From 8a726455aa2c11c5af95a6a42b19a6b44ac24df0 Mon Sep 17 00:00:00 2001 From: yuyi Date: Mon, 9 Sep 2024 23:13:37 +0800 Subject: [PATCH] fmt: remove the prfixed module name of the const name in same module --- cmd/tools/modules/scripting/scripting.v | 8 +- cmd/tools/modules/testing/common.v | 44 +- cmd/tools/modules/testing/output_normal.v | 4 +- .../modules/some_module/some_module.v | 8 +- .../fireworks/modules/objects/constants.v | 2 +- .../modules/sim/anim/app.v | 2 +- .../modules/sim/args/parser.v | 4 +- .../pendulum-simulation/modules/sim/params.v | 12 +- .../modules/sim/params_test.v | 27 +- .../pendulum-simulation/modules/sim/runner.v | 4 +- .../modules/sim/sim_test.v | 8 +- .../pendulum-simulation/modules/sim/worker.v | 4 +- .../modules/sim/worker_test.v | 4 +- .../particles/modules/particle/particle.v | 10 +- vlib/arrays/arrays.v | 8 +- vlib/arrays/arrays_test.v | 12 +- vlib/benchmark/benchmark.v | 10 +- vlib/bitfield/bitfield.v | 60 +-- vlib/builtin/wchar/wchar.c.v | 4 +- vlib/cli/help.v | 21 +- vlib/clipboard/clipboard_windows.c.v | 6 +- vlib/clipboard/x11/clipboard.c.v | 2 +- vlib/compress/compress.c.v | 12 +- vlib/compress/deflate/deflate_test.v | 2 +- vlib/compress/gzip/gzip.v | 12 +- vlib/compress/zstd/zstd.c.v | 16 +- vlib/compress/zstd/zstd_test.v | 2 +- vlib/context/onecontext/onecontext.v | 4 +- vlib/crypto/aes/aes.v | 16 +- vlib/crypto/bcrypt/base64.v | 14 +- vlib/crypto/bcrypt/bcrypt.v | 32 +- vlib/crypto/blake2b/blake2b.v | 46 +- vlib/crypto/blake2b/blake2b_block_test.v | 10 +- vlib/crypto/blake2s/blake2s.v | 46 +- vlib/crypto/blake2s/blake2s_block_test.v | 10 +- vlib/crypto/blake3/blake3.v | 24 +- vlib/crypto/blake3/blake3_chunk_test.v | 2 +- vlib/crypto/blake3/blake3_test.v | 8 +- vlib/crypto/des/des.v | 28 +- vlib/crypto/ed25519/ed25519.v | 24 +- .../ed25519/internal/edwards25519/element.v | 62 +-- .../internal/edwards25519/element_test.v | 22 +- .../internal/edwards25519/extra_test.v | 4 +- .../ed25519/internal/edwards25519/point.v | 20 +- .../internal/edwards25519/point_test.v | 4 +- .../ed25519/internal/edwards25519/scalar.v | 20 +- .../internal/edwards25519/scalar_test.v | 6 +- .../internal/edwards25519/scalarmult_test.v | 10 +- vlib/crypto/hmac/hmac.v | 6 +- vlib/crypto/hmac/hmac_test.v | 76 ++-- vlib/crypto/md5/md5.v | 22 +- vlib/crypto/pem/pem_test.v | 38 +- vlib/crypto/rand/rand_linux.c.v | 8 +- vlib/crypto/rand/rand_solaris.c.v | 8 +- vlib/crypto/rand/rand_windows.c.v | 4 +- vlib/crypto/sha1/sha1.v | 24 +- vlib/crypto/sha1/sha1block_generic.v | 10 +- vlib/crypto/sha256/sha256.v | 56 +-- vlib/crypto/sha256/sha256block_generic.v | 2 +- vlib/crypto/sha3/sha3.v | 38 +- vlib/crypto/sha3/sha3_state_generic.v | 4 +- vlib/crypto/sha3/sha3_state_test.v | 20 +- vlib/crypto/sha3/sha3_test.v | 36 +- vlib/crypto/sha512/sha512.v | 108 ++--- vlib/datatypes/bloom_filter.v | 8 +- vlib/db/mysql/stmt.c.v | 28 +- vlib/db/sqlite/sqlite.c.v | 10 +- vlib/dl/dl.v | 2 +- vlib/dl/loader/loader.v | 10 +- vlib/dlmalloc/dlmalloc.v | 78 ++-- vlib/encoding/base32/base32.v | 16 +- vlib/encoding/csv/csv_reader_random_access.v | 24 +- vlib/encoding/html/escape.v | 8 +- vlib/encoding/txtar/txtar.v | 8 +- .../utf8/east_asian/east_asian_width.v | 4 +- vlib/encoding/utf8/utf8_tables.v | 4 +- vlib/encoding/utf8/utf8_util.v | 4 +- vlib/encoding/xml/entity.v | 4 +- vlib/encoding/xml/parser.v | 30 +- vlib/encoding/xml/parser_test.v | 6 +- vlib/eventbus/eventbus.v | 4 +- vlib/flag/flag.v | 20 +- vlib/gg/draw.c.v | 4 +- vlib/gg/gg.c.v | 2 +- vlib/gg/gg.js.v | 4 +- vlib/gg/m4/matrix.v | 4 +- vlib/gg/recorder.c.v | 6 +- vlib/gx/color.v | 2 +- vlib/hash/crc32/crc32.v | 2 +- vlib/hash/fnv1a/fnv1a.v | 32 +- vlib/io/io.v | 2 +- vlib/io/reader.v | 8 +- vlib/io/reader_test.v | 8 +- vlib/io/util/util.v | 8 +- vlib/math/big/array_ops.v | 6 +- vlib/math/big/integer.v | 8 +- vlib/math/bits.v | 8 +- vlib/math/bits/bits.v | 76 ++-- vlib/math/erf.v | 64 ++- vlib/math/exp.v | 8 +- vlib/math/fractions/approximations.v | 8 +- vlib/math/invtrig.v | 8 +- vlib/math/math_test.v | 216 ++++----- vlib/math/modf.v | 6 +- vlib/math/pow.v | 4 +- vlib/math/sin.v | 12 +- vlib/math/tan.v | 16 +- vlib/math/tanh.v | 4 +- vlib/math/unsigned/uint128.v | 2 +- vlib/math/unsigned/uint256.v | 2 +- vlib/math/vec/vec2.v | 2 +- vlib/net/address.c.v | 14 +- vlib/net/common.c.v | 6 +- vlib/net/ftp/ftp.v | 24 +- vlib/net/html/data_structures.v | 6 +- vlib/net/html/tag_test.v | 4 +- vlib/net/http/download_progress.v | 2 +- vlib/net/http/file/folder_index.v | 2 +- vlib/net/http/file/static_server.v | 4 +- vlib/net/http/header.v | 6 +- vlib/net/http/http.v | 6 +- vlib/net/http/http_proxy_test.v | 16 +- vlib/net/http/server.v | 6 +- vlib/net/mbedtls/ssl_connection.c.v | 22 +- vlib/net/net_windows.c.v | 2 +- vlib/net/socks/socks5.v | 14 +- vlib/net/tcp.c.v | 24 +- vlib/net/tcp_read_line.c.v | 8 +- vlib/net/udp.c.v | 8 +- vlib/net/unix/common.c.v | 2 +- vlib/net/unix/stream.c.v | 18 +- vlib/net/urllib/urllib.v | 11 +- vlib/net/util.v | 2 +- vlib/net/websocket/message.v | 12 +- vlib/net/websocket/websocket_client.v | 2 +- vlib/orm/orm.v | 34 +- vlib/os/filepath.v | 34 +- vlib/os/notify/backend_darwin.c.v | 26 +- vlib/os/notify/backend_linux.c.v | 32 +- vlib/os/os.c.v | 12 +- vlib/os/os.v | 2 +- vlib/os/os_nix.c.v | 12 +- vlib/os/os_windows.c.v | 4 +- vlib/picoev/picoev.v | 32 +- vlib/picohttpparser/misc.v | 24 +- vlib/picohttpparser/picohttpparser.v | 2 +- vlib/rand/mini_math.v | 4 +- vlib/rand/mt19937/mt19937.v | 26 +- vlib/rand/rand.c.v | 8 +- vlib/rand/rand.js.v | 6 +- vlib/rand/rand.v | 12 +- vlib/rand/sys/system_rng.c.v | 20 +- vlib/rand/wyrand/wyrand.v | 4 +- vlib/rand/xoroshiro128pp/xoros128pp_test.v | 90 ++-- vlib/regex/regex.v | 423 +++++++++--------- vlib/semver/parse.v | 9 +- vlib/semver/range.v | 14 +- vlib/strconv/atof.c.v | 46 +- vlib/strconv/atofq.c.v | 8 +- vlib/strconv/atoi.v | 12 +- vlib/strconv/f32_str.c.v | 32 +- vlib/strconv/format_mem.c.v | 12 +- vlib/strconv/number_to_base.c.v | 4 +- vlib/sync/channels.c.v | 4 +- vlib/term/ui/color.v | 8 +- vlib/term/ui/input_windows.c.v | 2 +- vlib/term/ui/termios_nix.c.v | 2 +- vlib/term/ui/ui.c.v | 4 +- vlib/time/duration.v | 46 +- vlib/time/format.v | 6 +- vlib/time/misc/misc.v | 2 +- vlib/time/time.v | 44 +- vlib/time/time_darwin.c.v | 12 +- vlib/time/time_windows.c.v | 2 +- vlib/toml/checker/checker.v | 4 +- vlib/toml/decoder/decoder.v | 4 +- vlib/toml/parser/parser.v | 90 ++-- vlib/toml/scanner/scanner.v | 16 +- vlib/toml/toml.v | 14 +- vlib/v/ast/ast.v | 14 +- vlib/v/ast/comptime_valid_idents.v | 10 +- vlib/v/ast/str.v | 2 +- vlib/v/ast/table.v | 16 +- vlib/v/ast/types.v | 88 ++-- vlib/v/builder/cbuilder/parallel_cc.v | 6 +- vlib/v/builder/cc.v | 22 +- vlib/v/builder/msvc_windows.v | 10 +- vlib/v/cflag/cflags.v | 6 +- vlib/v/checker/checker.v | 14 +- vlib/v/checker/containers.v | 6 +- vlib/v/checker/fn.v | 6 +- vlib/v/checker/str.v | 10 +- vlib/v/doc/comment.v | 4 +- vlib/v/doc/node.v | 2 +- vlib/v/doc/utils.v | 4 +- vlib/v/fmt/fmt.v | 52 +-- ..._to_have_the_module_as_a_substring_keep.vv | 12 +- ...h_const_default_value_and_comments_keep.vv | 2 +- vlib/v/gen/c/assert.v | 4 +- vlib/v/gen/c/auto_str_methods.v | 26 +- vlib/v/gen/c/cgen.v | 60 +-- vlib/v/gen/c/cmain.v | 4 +- vlib/v/gen/c/fn.v | 14 +- vlib/v/gen/c/live.v | 4 +- vlib/v/gen/c/reflection.v | 70 +-- vlib/v/gen/c/struct.v | 2 +- .../freestanding_define/a_d_freestanding.c.v | 2 +- .../a_notd_freestanding.c.v | 2 +- vlib/v/gen/golang/golang.v | 10 +- vlib/v/gen/js/fn.v | 6 +- vlib/v/gen/js/js.v | 18 +- vlib/v/gen/js/sourcemap/source_map.v | 2 +- vlib/v/gen/js/sourcemap/vlq/vlq.v | 24 +- vlib/v/gen/js/tests/hello/hello.v | 2 +- vlib/v/gen/native/amd64.v | 10 +- vlib/v/gen/native/blacklist.v | 2 +- vlib/v/gen/native/dos.v | 2 +- vlib/v/gen/native/elf.v | 98 ++-- vlib/v/gen/native/gen.v | 6 +- vlib/v/gen/native/macho.v | 42 +- vlib/v/gen/native/pe.v | 64 +-- vlib/v/gen/native/readdll.c.v | 14 +- vlib/v/help/help.v | 2 +- vlib/v/parser/comptime.v | 6 +- vlib/v/parser/expr.v | 4 +- vlib/v/parser/parse_type.v | 6 +- vlib/v/parser/parser.v | 8 +- vlib/v/parser/tmpl.v | 12 +- vlib/v/parser/v_parser_test.v | 4 +- vlib/v/pkgconfig/pkgconfig.v | 4 +- vlib/v/pref/pref.v | 2 +- vlib/v/scanner/scanner.v | 72 +-- vlib/v/tests/bench/math_big_gcd/prime/maker.v | 2 +- vlib/v/tests/create_dll/create_win_dll.c.v | 2 +- .../modules/somemoduletwo/somemoduletwo.v | 2 +- .../src/modules/somemodule/somemodule.v | 2 +- vlib/v/token/token.v | 16 +- vlib/v/util/diff/diff.v | 8 +- vlib/v/util/errors.v | 14 +- vlib/v/util/quote.v | 26 +- vlib/v/util/scanning.v | 4 +- vlib/v/util/util.v | 27 +- vlib/v/util/version/version.v | 4 +- vlib/v/vmod/parser.v | 18 +- vlib/v/vmod/vmod.v | 4 +- vlib/v2/ast/ast.v | 46 +- vlib/v2/gen/v/gen.v | 4 +- vlib/v2/types/universe.v | 58 +-- vlib/veb/auth/auth.v | 2 +- vlib/veb/middleware.v | 2 +- vlib/veb/parse.v | 4 +- vlib/veb/veb_livereload.v | 4 +- vlib/vweb/assets/assets.v | 4 +- vlib/vweb/parse.v | 4 +- vlib/vweb/tests/vweb_test_server/server.v | 4 +- vlib/vweb/vweb.v | 20 +- vlib/vweb/vweb_livereload.v | 4 +- vlib/x/crypto/chacha20/chacha.v | 35 +- vlib/x/crypto/chacha20/chacha_test.v | 10 +- vlib/x/crypto/chacha20/xchacha.v | 2 +- vlib/x/crypto/chacha20/xchacha_test.v | 2 +- .../chacha20poly1305/chacha20poly1305.v | 18 +- .../chacha20poly1305/chacha20poly1305_test.v | 4 +- vlib/x/crypto/poly1305/poly1305.v | 56 +-- vlib/x/crypto/poly1305/poly1305_test.v | 6 +- vlib/x/crypto/sm4/sm4.v | 10 +- vlib/x/json2/count_test.v | 2 +- vlib/x/json2/encoder.v | 61 ++- vlib/x/json2/scanner.v | 17 +- vlib/x/sessions/sessions.v | 2 +- .../templating/dtm/dynamic_template_manager.v | 116 ++--- ...namic_template_manager_cache_system_test.v | 28 +- .../dtm/dynamic_template_manager_test.v | 34 +- vlib/x/templating/dtm/tmpl.v | 2 +- vlib/x/ttf/common.v | 2 +- vlib/x/ttf/ttf.v | 38 +- vlib/x/vweb/middleware.v | 2 +- vlib/x/vweb/parse.v | 4 +- vlib/x/vweb/vweb.v | 42 +- vlib/x/vweb/vweb_livereload.v | 4 +- 280 files changed, 2524 insertions(+), 2557 deletions(-) diff --git a/cmd/tools/modules/scripting/scripting.v b/cmd/tools/modules/scripting/scripting.v index fd1382b579c3d8..5c5777e01ca815 100644 --- a/cmd/tools/modules/scripting/scripting.v +++ b/cmd/tools/modules/scripting/scripting.v @@ -18,7 +18,7 @@ pub fn set_verbose(on bool) { pub fn cprint(omessage string) { mut message := omessage - if scripting.term_colors { + if term_colors { message = term.cyan(message) } print(message) @@ -27,7 +27,7 @@ pub fn cprint(omessage string) { pub fn cprint_strong(omessage string) { mut message := omessage - if scripting.term_colors { + if term_colors { message = term.bright_green(message) } print(message) @@ -57,7 +57,7 @@ pub fn verbose_trace_strong(label string, omessage string) { if os.getenv('VERBOSE').len > 0 { slabel := '${time.now().format_ss_milli()} ${label}' mut message := omessage - if scripting.term_colors { + if term_colors { message = term.bright_green(message) } cprintln('# ${slabel:-43s} : ${message}') @@ -71,7 +71,7 @@ pub fn verbose_trace_exec_result(x os.Result) { lines := x.output.split_into_lines() for oline in lines { mut line := oline - if scripting.term_colors { + if term_colors { line = term.bright_green(line) } cprintln('# ${lnum:3d}: ${line}') diff --git a/cmd/tools/modules/testing/common.v b/cmd/tools/modules/testing/common.v index 294da9b96c8f25..d157aa2a685148 100644 --- a/cmd/tools/modules/testing/common.v +++ b/cmd/tools/modules/testing/common.v @@ -258,13 +258,13 @@ pub fn new_test_session(_vargs string, will_compile bool) TestSession { skip_files << 'examples/pendulum-simulation/parallel.v' skip_files << 'examples/pendulum-simulation/parallel_with_iw.v' skip_files << 'examples/pendulum-simulation/sequential.v' - if testing.github_job == 'tcc' { + if github_job == 'tcc' { // TODO: fix these by adding declarations for the missing functions in the prebuilt tcc skip_files << 'vlib/net/mbedtls/mbedtls_compiles_test.v' skip_files << 'vlib/net/ssl/ssl_compiles_test.v' } } - if testing.runner_os != 'Linux' || testing.github_job != 'tcc' { + if runner_os != 'Linux' || github_job != 'tcc' { if !os.exists('/usr/local/include/wkhtmltox/pdf.h') { skip_files << 'examples/c_interop_wkhtmltopdf.v' // needs installation of wkhtmltopdf from https://github.com/wkhtmltopdf/packaging/releases } @@ -275,16 +275,16 @@ pub fn new_test_session(_vargs string, will_compile bool) TestSession { $if !macos { skip_files << 'examples/macos_tray/tray.v' } - if testing.github_job == 'ubuntu-docker-musl' { + if github_job == 'ubuntu-docker-musl' { skip_files << 'vlib/net/openssl/openssl_compiles_test.c.v' skip_files << 'vlib/x/ttf/ttf_test.v' } - if testing.github_job == 'tests-sanitize-memory-clang' { + if github_job == 'tests-sanitize-memory-clang' { skip_files << 'vlib/net/openssl/openssl_compiles_test.c.v' // Fails compilation with: `/usr/bin/ld: /lib/x86_64-linux-gnu/libpthread.so.0: error adding symbols: DSO missing from command line` skip_files << 'examples/sokol/sounds/simple_sin_tones.v' } - if testing.github_job != 'misc-tooling' { + if github_job != 'misc-tooling' { // These examples need .h files that are produced from the supplied .glsl files, // using by the shader compiler tools in https://github.com/floooh/sokol-tools-bin/archive/pre-feb2021-api-changes.tar.gz skip_files << 'examples/sokol/02_cubes_glsl/cube_glsl.v' @@ -313,7 +313,7 @@ pub fn new_test_session(_vargs string, will_compile bool) TestSession { vexe: vexe vroot: vroot skip_files: skip_files - fail_fast: testing.fail_fast + fail_fast: fail_fast show_stats: '-stats' in vargs.split(' ') show_asserts: '-show-asserts' in vargs.split(' ') vargs: vargs @@ -396,14 +396,14 @@ pub fn (mut ts TestSession) test() { // Special case for android_outside_termux because of its // underscores if file.ends_with('_android_outside_termux_test.v') { - if !testing.host_os.is_target_of('android_outside_termux') { + if !host_os.is_target_of('android_outside_termux') { remaining_files << dot_relative_file ts.skip_files << file continue } } os_target := file.all_before_last('_test.v').all_after_last('_') - if !testing.host_os.is_target_of(os_target) { + if !host_os.is_target_of(os_target) { remaining_files << dot_relative_file ts.skip_files << file continue @@ -553,7 +553,7 @@ fn worker_trunner(mut p pool.PoolProcessor, idx int, thread_id int) voidptr { if !ts.build_tools && abs_path in ts.skip_files { ts.benchmark.skip() tls_bench.skip() - if !testing.hide_skips { + if !hide_skips { ts.append_message(.skip, tls_bench.step_message_with_label_and_duration(benchmark.b_skip, normalised_relative_file, 0, preparation: 1 * time.microsecond @@ -597,9 +597,9 @@ fn worker_trunner(mut p pool.PoolProcessor, idx int, thread_id int) voidptr { goto test_passed_system } } - time.sleep(testing.fail_retry_delay_ms) + time.sleep(fail_retry_delay_ms) } - if details.flaky && !testing.fail_flaky { + if details.flaky && !fail_flaky { ts.append_message(.info, ' *FAILURE* of the known flaky test file ${relative_file} is ignored, since VTEST_FAIL_FLAKY is 0 . Retry count: ${details.retry} .\ncmd: ${cmd}', mtc) unsafe { @@ -616,14 +616,14 @@ fn worker_trunner(mut p pool.PoolProcessor, idx int, thread_id int) voidptr { return pool.no_result } } else { - if testing.show_start { + if show_start { ts.append_message(.info, ' starting ${relative_file} ...', mtc) } ts.append_message(.compile_begin, cmd, mtc) compile_d_cmd := time.new_stopwatch() mut compile_r := os.Result{} - for cretry in 0 .. testing.max_compilation_retries { + for cretry in 0 .. max_compilation_retries { compile_r = os.execute(cmd) compile_cmd_duration = compile_d_cmd.elapsed() // eprintln('>>>> cretry: $cretry | compile_r.exit_code: $compile_r.exit_code | compile_cmd_duration: ${compile_cmd_duration:8} | file: $normalised_relative_file') @@ -670,7 +670,7 @@ fn worker_trunner(mut p pool.PoolProcessor, idx int, thread_id int) voidptr { // retry running at least 1 more time, to avoid CI false positives as much as possible details.retry++ } - failure_output.write_string(testing.separator) + failure_output.write_string(separator) failure_output.writeln(' retry: 0 ; max_retry: ${details.retry} ; r.exit_code: ${r.exit_code} ; trimmed_output.len: ${trimmed_output.len}') failure_output.writeln(trimmed_output) os.setenv('VTEST_RETRY_MAX', '${details.retry}', true) @@ -691,13 +691,13 @@ fn worker_trunner(mut p pool.PoolProcessor, idx int, thread_id int) voidptr { } } trimmed_output = r.output.trim_space() - failure_output.write_string(testing.separator) + failure_output.write_string(separator) failure_output.writeln(' retry: ${retry} ; max_retry: ${details.retry} ; r.exit_code: ${r.exit_code} ; trimmed_output.len: ${trimmed_output.len}') failure_output.writeln(trimmed_output) - time.sleep(testing.fail_retry_delay_ms) + time.sleep(fail_retry_delay_ms) } full_failure_output := failure_output.str().trim_space() - if details.flaky && !testing.fail_flaky { + if details.flaky && !fail_flaky { ts.append_message(.info, ' *FAILURE* of the known flaky test file ${relative_file} is ignored, since VTEST_FAIL_FLAKY is 0 . Retry count: ${details.retry} .\n comp_cmd: ${cmd}\n run_cmd: ${run_cmd}', mtc) unsafe { @@ -706,7 +706,7 @@ fn worker_trunner(mut p pool.PoolProcessor, idx int, thread_id int) voidptr { } ts.benchmark.fail() tls_bench.fail() - cmd_duration = d_cmd.elapsed() - (testing.fail_retry_delay_ms * details.retry) + cmd_duration = d_cmd.elapsed() - (fail_retry_delay_ms * details.retry) ts.append_message_with_duration(.fail, tls_bench.step_message_with_label_and_duration(benchmark.b_fail, '${normalised_relative_file}\n retry: ${retry}\n comp_cmd: ${cmd}\n run_cmd: ${run_cmd}\nfailure code: ${r.exit_code}; foutput.len: ${full_failure_output.len}; failure output:\n${full_failure_output}', cmd_duration, @@ -720,7 +720,7 @@ fn worker_trunner(mut p pool.PoolProcessor, idx int, thread_id int) voidptr { test_passed_execute: ts.benchmark.ok() tls_bench.ok() - if !testing.hide_oks { + if !hide_oks { ts.append_message_with_duration(.ok, tls_bench.step_message_with_label_and_duration(benchmark.b_ok, normalised_relative_file, cmd_duration, preparation: compile_cmd_duration @@ -769,7 +769,7 @@ pub fn prepare_test_session(zargs string, folder string, oskipped []string, main } } c := os.read_file(fnormalised) or { panic(err) } - start := c#[0..testing.header_bytes_to_search_for_module_main] + start := c#[0..header_bytes_to_search_for_module_main] if start.contains('module ') && !start.contains('module main') { skipped << fnormalised.replace(nparent_dir + '/', '') continue next_file @@ -834,7 +834,7 @@ pub fn building_any_v_binaries_failed() bool { continue } bmark.ok() - if !testing.hide_oks { + if !hide_oks { eprintln(bmark.step_message_ok('command: ${cmd}')) } } @@ -875,7 +875,7 @@ pub fn get_test_details(file string) TestDetails { } pub fn find_started_process(pname string) !string { - for line in testing.all_processes { + for line in all_processes { if line.contains(pname) { return line } diff --git a/cmd/tools/modules/testing/output_normal.v b/cmd/tools/modules/testing/output_normal.v index 0427bdd6d6c922..0281ac925651db 100644 --- a/cmd/tools/modules/testing/output_normal.v +++ b/cmd/tools/modules/testing/output_normal.v @@ -48,14 +48,14 @@ pub fn (r NormalReporter) progress(index int, message string) { // in progress mode, the last line will be rewritten many times, and does not end with \n // the \n will be printed just once when some progress has been made. pub fn (r NormalReporter) update_last_line(index int, message string) { - print('\r${testing.empty}\r${message}') + print('\r${empty}\r${message}') flush_stdout() } pub fn (r NormalReporter) update_last_line_and_move_to_next(index int, message string) { // the last \n is needed, so SKIP/FAIL messages // will not get overwritten by the OK ones - eprint('\r${testing.empty}\r${message}\n') + eprint('\r${empty}\r${message}\n') } pub fn (r NormalReporter) message(index int, message string) { diff --git a/examples/eventbus/modules/some_module/some_module.v b/examples/eventbus/modules/some_module/some_module.v index 9bd35284be19d2..add9721ed13fd0 100644 --- a/examples/eventbus/modules/some_module/some_module.v +++ b/examples/eventbus/modules/some_module/some_module.v @@ -20,13 +20,13 @@ pub fn do_work() { println('working...') if i == 5 { event_metadata := &EventMetadata{'Iteration ' + i.str()} - some_module.eb.publish('event_foo', duration, event_metadata) - some_module.eb.publish('event_bar', duration, event_metadata) + eb.publish('event_foo', duration, event_metadata) + eb.publish('event_bar', duration, event_metadata) } } - some_module.eb.publish('event_baz', &Duration{42}, &EventMetadata{'Additional data at the end.'}) + eb.publish('event_baz', &Duration{42}, &EventMetadata{'Additional data at the end.'}) } pub fn get_subscriber() eventbus.Subscriber[string] { - return *some_module.eb.subscriber + return *eb.subscriber } diff --git a/examples/fireworks/modules/objects/constants.v b/examples/fireworks/modules/objects/constants.v index a8e5abd8c07d47..05fd09f4951654 100644 --- a/examples/fireworks/modules/objects/constants.v +++ b/examples/fireworks/modules/objects/constants.v @@ -16,5 +16,5 @@ pub mut: const params = &UIParams{} pub fn get_params() &UIParams { - return objects.params + return params } diff --git a/examples/pendulum-simulation/modules/sim/anim/app.v b/examples/pendulum-simulation/modules/sim/anim/app.v index f1fa3e512add5a..a3c8de0eecd0be 100644 --- a/examples/pendulum-simulation/modules/sim/anim/app.v +++ b/examples/pendulum-simulation/modules/sim/anim/app.v @@ -38,7 +38,7 @@ pub fn new_app(args simargs.ParallelArgs) &App { create_window: true window_title: 'V Pendulum Simulation' user_data: app - bg_color: anim.bg_color + bg_color: bg_color frame_fn: frame init_fn: init ) diff --git a/examples/pendulum-simulation/modules/sim/args/parser.v b/examples/pendulum-simulation/modules/sim/args/parser.v index 28a40610639d68..0ff8ad56db639c 100644 --- a/examples/pendulum-simulation/modules/sim/args/parser.v +++ b/examples/pendulum-simulation/modules/sim/args/parser.v @@ -27,7 +27,7 @@ pub: pub struct ParallelArgs { SequentialArgs pub: - workers int = args.max_parallel_workers + workers int = max_parallel_workers } pub type SimArgs = ParallelArgs | SequentialArgs @@ -101,7 +101,7 @@ fn parse_parallel_args(extra_workers int) !ParallelArgs { fp.description('This is a pendulum simulation written in pure V') fp.skip_executable() - workers := fp.int('workers', 0, args.max_parallel_workers, 'amount of workers to use on simulation. Defaults to ${args.max_parallel_workers}') + workers := fp.int('workers', 0, max_parallel_workers, 'amount of workers to use on simulation. Defaults to ${max_parallel_workers}') // output parameters width := fp.int('width', `w`, sim.default_width, 'width of the image output. Defaults to ${sim.default_width}') diff --git a/examples/pendulum-simulation/modules/sim/params.v b/examples/pendulum-simulation/modules/sim/params.v index 827146e7ac0d53..516935785aeca8 100644 --- a/examples/pendulum-simulation/modules/sim/params.v +++ b/examples/pendulum-simulation/modules/sim/params.v @@ -12,12 +12,12 @@ pub const default_gravity = 4.9 @[params] pub struct SimParams { pub: - rope_length f64 = sim.default_rope_length - bearing_mass f64 = sim.default_bearing_mass - magnet_spacing f64 = sim.default_magnet_spacing - magnet_height f64 = sim.default_magnet_height - magnet_strength f64 = sim.default_magnet_strength - gravity f64 = sim.default_gravity + rope_length f64 = default_rope_length + bearing_mass f64 = default_bearing_mass + magnet_spacing f64 = default_magnet_spacing + magnet_height f64 = default_magnet_height + magnet_strength f64 = default_magnet_strength + gravity f64 = default_gravity } pub fn sim_params(params SimParams) SimParams { diff --git a/examples/pendulum-simulation/modules/sim/params_test.v b/examples/pendulum-simulation/modules/sim/params_test.v index 328bc6a674990d..a66e3bdb341573 100644 --- a/examples/pendulum-simulation/modules/sim/params_test.v +++ b/examples/pendulum-simulation/modules/sim/params_test.v @@ -30,7 +30,7 @@ const params_test_mock_state = SimState{ const params_test_mock_tetha = 2.0 * math.pi / 3.0 pub fn test_get_rope_vector() { - result := sim.params_test_mock_params.get_rope_vector(sim.params_test_mock_state) + result := params_test_mock_params.get_rope_vector(params_test_mock_state) expected := vector( x: -0.016957230930171364 y: -0.02937078552673521 @@ -40,7 +40,7 @@ pub fn test_get_rope_vector() { } pub fn test_get_forces_sum() { - result := sim.params_test_mock_params.get_forces_sum(sim.params_test_mock_state) + result := params_test_mock_params.get_forces_sum(params_test_mock_state) expected := vector( x: 3.410605131648481e-12 y: 5.229594535194337e-12 @@ -50,7 +50,7 @@ pub fn test_get_forces_sum() { } pub fn test_get_grav_force() { - result := sim.params_test_mock_params.get_grav_force(sim.params_test_mock_state) + result := params_test_mock_params.get_grav_force(params_test_mock_state) expected := vector( z: -0.147 ) @@ -58,7 +58,7 @@ pub fn test_get_grav_force() { } pub fn test_get_magnet_position() { - result := sim.params_test_mock_params.get_magnet_position(sim.params_test_mock_tetha) + result := params_test_mock_params.get_magnet_position(params_test_mock_tetha) expected := vector( x: -0.02499999999999999 y: 0.04330127018922194 @@ -68,8 +68,7 @@ pub fn test_get_magnet_position() { } pub fn test_get_magnet_force() { - result := sim.params_test_mock_params.get_magnet_force(sim.params_test_mock_tetha, - sim.params_test_mock_state) + result := params_test_mock_params.get_magnet_force(params_test_mock_tetha, params_test_mock_state) expected := vector( x: -157.4572297692556 y: 1422.736432604726 @@ -79,14 +78,13 @@ pub fn test_get_magnet_force() { } pub fn test_get_magnet_dist() { - result := sim.params_test_mock_params.get_magnet_dist(sim.params_test_mock_tetha, - sim.params_test_mock_state) + result := params_test_mock_params.get_magnet_dist(params_test_mock_tetha, params_test_mock_state) expected := 0.07993696666249227 assert result == expected } pub fn test_get_magnet1_force() { - result := sim.params_test_mock_params.get_magnet1_force(sim.params_test_mock_state) + result := params_test_mock_params.get_magnet1_force(params_test_mock_state) expected := vector( x: 1310.8545084099674 y: 575.0062553126633 @@ -96,7 +94,7 @@ pub fn test_get_magnet1_force() { } pub fn test_get_magnet2_force() { - result := sim.params_test_mock_params.get_magnet2_force(sim.params_test_mock_state) + result := params_test_mock_params.get_magnet2_force(params_test_mock_state) expected := vector( x: -157.4572297692556 y: 1422.736432604726 @@ -106,7 +104,7 @@ pub fn test_get_magnet2_force() { } pub fn test_get_magnet3_force() { - result := sim.params_test_mock_params.get_magnet3_force(sim.params_test_mock_state) + result := params_test_mock_params.get_magnet3_force(params_test_mock_state) expected := vector( x: -1710.46541088048 y: -2962.612996234165 @@ -116,8 +114,11 @@ pub fn test_get_magnet3_force() { } pub fn test_get_tension_force() { - result := sim.params_test_mock_params.get_tension_force(sim.params_test_mock_state, - vector(x: 0.0, y: 0.0, z: 0.0)) + result := params_test_mock_params.get_tension_force(params_test_mock_state, vector( + x: 0.0 + y: 0.0 + z: 0.0 + )) expected := vector(x: 0.0, y: 0.0, z: 0.0) assert result == expected } diff --git a/examples/pendulum-simulation/modules/sim/runner.v b/examples/pendulum-simulation/modules/sim/runner.v index 93f434f56ca048..f1a25f38d1566e 100644 --- a/examples/pendulum-simulation/modules/sim/runner.v +++ b/examples/pendulum-simulation/modules/sim/runner.v @@ -15,8 +15,8 @@ pub const default_height = 600 @[params] pub struct GridSettings { pub: - width int = sim.default_width - height int = sim.default_height + width int = default_width + height int = default_height } pub fn new_grid_settings(settings GridSettings) GridSettings { diff --git a/examples/pendulum-simulation/modules/sim/sim_test.v b/examples/pendulum-simulation/modules/sim/sim_test.v index 4551e46685aa9b..454aab422f72d0 100644 --- a/examples/pendulum-simulation/modules/sim/sim_test.v +++ b/examples/pendulum-simulation/modules/sim/sim_test.v @@ -28,10 +28,10 @@ const sim_test_mock_state = SimState{ pub fn test_satisfy_rope_constraint() { mut state := SimState{ - ...sim.sim_test_mock_state + ...sim_test_mock_state } - state.satisfy_rope_constraint(sim.sim_test_mock_params) + state.satisfy_rope_constraint(sim_test_mock_params) assert state.position.x == -0.016957230930171364 assert state.position.y == -0.02937078552673521 assert state.position.z == 0.002311063475327252 @@ -45,11 +45,11 @@ pub fn test_satisfy_rope_constraint() { pub fn test_increment() { mut state := SimState{ - ...sim.sim_test_mock_state + ...sim_test_mock_state } delta_t := 0.0005 - state.increment(delta_t, sim.sim_test_mock_params) + state.increment(delta_t, sim_test_mock_params) assert state.position.x == -0.016957230930171364 assert state.position.y == -0.02937078552673524 assert state.position.z == 0.0023110634753272796 diff --git a/examples/pendulum-simulation/modules/sim/worker.v b/examples/pendulum-simulation/modules/sim/worker.v index 8fb2e10fc65bbe..4eead6fb868947 100644 --- a/examples/pendulum-simulation/modules/sim/worker.v +++ b/examples/pendulum-simulation/modules/sim/worker.v @@ -41,8 +41,8 @@ pub fn compute_result(request SimRequest) &SimResult { mut state := request.state params := request.params - for _ in 0 .. sim.max_iterations { - state.increment(sim.simulation_delta_t, params) + for _ in 0 .. max_iterations { + state.increment(simulation_delta_t, params) if state.done() { println('done!') break diff --git a/examples/pendulum-simulation/modules/sim/worker_test.v b/examples/pendulum-simulation/modules/sim/worker_test.v index 45f518293600a3..c0080136f76d96 100644 --- a/examples/pendulum-simulation/modules/sim/worker_test.v +++ b/examples/pendulum-simulation/modules/sim/worker_test.v @@ -29,8 +29,8 @@ const worker_test_mock_state = SimState{ fn test_compute_result() { request := SimRequest{ id: 0 - params: sim.worker_test_mock_params - state: sim.worker_test_mock_state + params: worker_test_mock_params + state: worker_test_mock_state } expected_state := SimState{ position: vector( diff --git a/examples/sokol/particles/modules/particle/particle.v b/examples/sokol/particles/modules/particle/particle.v index 120f88c416caa2..b0aaf4e5dfd460 100644 --- a/examples/sokol/particles/modules/particle/particle.v +++ b/examples/sokol/particles/modules/particle/particle.v @@ -14,9 +14,9 @@ pub fn new(location vec.Vec2[f64]) &Particle { location: location velocity: vec.Vec2[f64]{0, 0} acceleration: vec.Vec2[f64]{0, 0} - color: particle.default_v_color - life_time: particle.default_life_time - life_time_init: particle.default_life_time + color: default_v_color + life_time: default_life_time + life_time_init: default_life_time } return p } @@ -73,7 +73,7 @@ pub fn (mut p Particle) reset() { p.acceleration.zero() p.velocity.zero() // p.color = Color{93, 136, 193, 255} - p.color = particle.default_v_color - p.life_time = particle.default_life_time + p.color = default_v_color + p.life_time = default_life_time p.life_time_init = p.life_time } diff --git a/vlib/arrays/arrays.v b/vlib/arrays/arrays.v index 97741963c12cb8..3e058ff152abe0 100644 --- a/vlib/arrays/arrays.v +++ b/vlib/arrays/arrays.v @@ -606,19 +606,19 @@ const extra_size = 32 * isize(sizeof(usize)) fn raw_array_cap[T]() isize { size := isize(sizeof(T)) - if size > arrays.extra_size { + if size > extra_size { return 1 } else { - return arrays.extra_size / size + return extra_size / size } } fn raw_array_malloc_size[T]() isize { size := isize(sizeof(T)) - if size > arrays.extra_size { + if size > extra_size { return size * 2 } else { - return arrays.extra_size + return extra_size } } diff --git a/vlib/arrays/arrays_test.v b/vlib/arrays/arrays_test.v index dfea48592676a4..690e100e1c9ef2 100644 --- a/vlib/arrays/arrays_test.v +++ b/vlib/arrays/arrays_test.v @@ -475,13 +475,13 @@ fn test_find_first() { })? == 3, 'find element couldnt find the right element' // find struct - find_by_name := find_first(arrays.test_structs, fn (arr FindTest) bool { + find_by_name := find_first(test_structs, fn (arr FindTest) bool { return arr.name == 'one' })? assert find_by_name == FindTest{'one', 1} // not found - if _ := find_first(arrays.test_structs, fn (arr FindTest) bool { + if _ := find_first(test_structs, fn (arr FindTest) bool { return arr.name == 'nothing' }) { @@ -499,13 +499,13 @@ fn test_find_last() { })? == 3, 'find element couldnt find the right element' // find struct - find_by_name := find_last(arrays.test_structs, fn (arr FindTest) bool { + find_by_name := find_last(test_structs, fn (arr FindTest) bool { return arr.name == 'one' })? assert find_by_name == FindTest{'one', 4} // not found - if _ := find_last(arrays.test_structs, fn (arr FindTest) bool { + if _ := find_last(test_structs, fn (arr FindTest) bool { return arr.name == 'nothing' }) { @@ -516,10 +516,10 @@ fn test_find_last() { } fn test_join_to_string() { - assert join_to_string[FindTest](arrays.test_structs, ':', fn (it FindTest) string { + assert join_to_string[FindTest](test_structs, ':', fn (it FindTest) string { return it.name }) == 'one:two:three:one' - assert join_to_string[FindTest](arrays.test_structs, '', fn (it FindTest) string { + assert join_to_string[FindTest](test_structs, '', fn (it FindTest) string { return it.name }) == 'onetwothreeone' assert join_to_string[int]([]int{}, ':', fn (it int) string { diff --git a/vlib/benchmark/benchmark.v b/vlib/benchmark/benchmark.v index d911bf2a2494e0..8b718b58cefaea 100644 --- a/vlib/benchmark/benchmark.v +++ b/vlib/benchmark/benchmark.v @@ -133,7 +133,7 @@ pub fn start() Benchmark { pub fn (mut b Benchmark) measure(label string) i64 { b.ok() res := b.step_timer.elapsed().microseconds() - println(b.step_message_with_label(benchmark.b_spent, 'in ${label}')) + println(b.step_message_with_label(b_spent, 'in ${label}')) b.step() return res } @@ -146,7 +146,7 @@ pub fn (mut b Benchmark) measure(label string) i64 { pub fn (mut b Benchmark) record_measure(label string) i64 { b.ok() res := b.step_timer.elapsed().microseconds() - b.measured_steps << b.step_message_with_label(benchmark.b_spent, 'in ${label}') + b.measured_steps << b.step_message_with_label(b_spent, 'in ${label}') b.step_data[label] << res b.step() return res @@ -212,17 +212,17 @@ pub fn (b &Benchmark) step_message(msg string, opts MessageOptions) string { // step_message_ok returns a string describing the current step with an standard "OK" label. pub fn (b &Benchmark) step_message_ok(msg string, opts MessageOptions) string { - return b.step_message_with_label(benchmark.b_ok, msg, opts) + return b.step_message_with_label(b_ok, msg, opts) } // step_message_fail returns a string describing the current step with an standard "FAIL" label. pub fn (b &Benchmark) step_message_fail(msg string, opts MessageOptions) string { - return b.step_message_with_label(benchmark.b_fail, msg, opts) + return b.step_message_with_label(b_fail, msg, opts) } // step_message_skip returns a string describing the current step with an standard "SKIP" label. pub fn (b &Benchmark) step_message_skip(msg string, opts MessageOptions) string { - return b.step_message_with_label(benchmark.b_skip, msg, opts) + return b.step_message_with_label(b_skip, msg, opts) } // total_message returns a string with total summary of the benchmark run. diff --git a/vlib/bitfield/bitfield.v b/vlib/bitfield/bitfield.v index fc2e688106bdbd..82b88d691cefba 100644 --- a/vlib/bitfield/bitfield.v +++ b/vlib/bitfield/bitfield.v @@ -117,7 +117,7 @@ pub fn (instance BitField) get_bit(bitnr int) int { if bitnr >= instance.size { return 0 } - return int((instance.field[bitslot(bitnr)] >> (bitnr % bitfield.slot_size)) & u32(1)) + return int((instance.field[bitslot(bitnr)] >> (bitnr % slot_size)) & u32(1)) } // set_bit sets bit number 'bit_nr' to 1 (count from 0). @@ -289,7 +289,7 @@ pub fn (mut instance BitField) has(a ...int) bool { if bitnr >= instance.size { return false } - if int((instance.field[bitslot(bitnr)] >> (bitnr % bitfield.slot_size)) & u32(1)) == 1 { + if int((instance.field[bitslot(bitnr)] >> (bitnr % slot_size)) & u32(1)) == 1 { return true } } @@ -304,7 +304,7 @@ pub fn (mut instance BitField) all(a ...int) bool { if bitnr >= instance.size { return false } - if int((instance.field[bitslot(bitnr)] >> (bitnr % bitfield.slot_size)) & u32(1)) == 0 { + if int((instance.field[bitslot(bitnr)] >> (bitnr % slot_size)) & u32(1)) == 0 { return false } } @@ -374,8 +374,8 @@ pub fn join(input1 BitField, input2 BitField) BitField { output.field[i] = input1.field[i] } // find offset bit and offset slot - offset_bit := input1.size % bitfield.slot_size - offset_slot := input1.size / bitfield.slot_size + offset_bit := input1.size % slot_size + offset_slot := input1.size / slot_size for i in 0 .. zbitnslots(input2.size) { output.field[i + offset_slot] |= u32(input2.field[i] << u32(offset_bit)) } @@ -392,13 +392,13 @@ pub fn join(input1 BitField, input2 BitField) BitField { * input. * If offset_bit is zero, no additional copies needed. */ - if (output_size - 1) % bitfield.slot_size < (input2.size - 1) % bitfield.slot_size { + if (output_size - 1) % slot_size < (input2.size - 1) % slot_size { for i in 0 .. zbitnslots(input2.size) { - output.field[i + offset_slot + 1] |= u32(input2.field[i] >> u32(bitfield.slot_size - offset_bit)) + output.field[i + offset_slot + 1] |= u32(input2.field[i] >> u32(slot_size - offset_bit)) } - } else if (output_size - 1) % bitfield.slot_size > (input2.size - 1) % bitfield.slot_size { + } else if (output_size - 1) % slot_size > (input2.size - 1) % slot_size { for i in 0 .. zbitnslots(input2.size) - 1 { - output.field[i + offset_slot + 1] |= u32(input2.field[i] >> u32(bitfield.slot_size - offset_bit)) + output.field[i + offset_slot + 1] |= u32(input2.field[i] >> u32(slot_size - offset_bit)) } } return output @@ -437,10 +437,10 @@ pub fn (a BitField) == (b BitField) bool { pub fn (instance BitField) pop_count() int { size := instance.size bitnslots := zbitnslots(size) - tail := size % bitfield.slot_size + tail := size % slot_size mut count := 0 for i in 0 .. bitnslots - 1 { - for j in 0 .. bitfield.slot_size { + for j in 0 .. slot_size { if u32(instance.field[i] >> u32(j)) & u32(1) == u32(1) { count++ } @@ -495,16 +495,16 @@ pub fn (input BitField) slice(_start int, _end int) BitField { start = end // or panic? } mut output := new(end - start) - start_offset := start % bitfield.slot_size - end_offset := (end - 1) % bitfield.slot_size - start_slot := start / bitfield.slot_size - end_slot := (end - 1) / bitfield.slot_size + start_offset := start % slot_size + end_offset := (end - 1) % slot_size + start_slot := start / slot_size + end_slot := (end - 1) / slot_size output_slots := zbitnslots(end - start) if output_slots > 1 { if start_offset != 0 { for i in 0 .. output_slots - 1 { output.field[i] = u32(input.field[start_slot + i] >> u32(start_offset)) - output.field[i] = output.field[i] | u32(input.field[start_slot + i + 1] << u32(bitfield.slot_size - start_offset)) + output.field[i] = output.field[i] | u32(input.field[start_slot + i + 1] << u32(slot_size - start_offset)) } } else { for i in 0 .. output_slots - 1 { @@ -513,25 +513,25 @@ pub fn (input BitField) slice(_start int, _end int) BitField { } } if start_offset > end_offset { - output.field[(end - start - 1) / bitfield.slot_size] = u32(input.field[end_slot - 1] >> u32(start_offset)) + output.field[(end - start - 1) / slot_size] = u32(input.field[end_slot - 1] >> u32(start_offset)) mut mask := u32((1 << (end_offset + 1)) - 1) mask = input.field[end_slot] & mask - mask = u32(mask << u32(bitfield.slot_size - start_offset)) - output.field[(end - start - 1) / bitfield.slot_size] |= mask + mask = u32(mask << u32(slot_size - start_offset)) + output.field[(end - start - 1) / slot_size] |= mask } else if start_offset == 0 { mut mask := u32(0) - if end_offset == bitfield.slot_size - 1 { + if end_offset == slot_size - 1 { mask = u32(-1) } else { mask = u32(u32(1) << u32(end_offset + 1)) mask = mask - u32(1) } - output.field[(end - start - 1) / bitfield.slot_size] = (input.field[end_slot] & mask) + output.field[(end - start - 1) / slot_size] = (input.field[end_slot] & mask) } else { mut mask := u32(((1 << (end_offset - start_offset + 1)) - 1) << start_offset) mask = input.field[end_slot] & mask mask = u32(mask >> u32(start_offset)) - output.field[(end - start - 1) / bitfield.slot_size] |= mask + output.field[(end - start - 1) / slot_size] |= mask } return output } @@ -543,13 +543,13 @@ pub fn (instance BitField) reverse() BitField { bitnslots := zbitnslots(size) mut output := new(size) for i := 0; i < (bitnslots - 1); i++ { - for j in 0 .. bitfield.slot_size { + for j in 0 .. slot_size { if u32(instance.field[i] >> u32(j)) & u32(1) == u32(1) { - output.set_bit(size - i * bitfield.slot_size - j - 1) + output.set_bit(size - i * slot_size - j - 1) } } } - bits_in_last_input_slot := (size - 1) % bitfield.slot_size + 1 + bits_in_last_input_slot := (size - 1) % slot_size + 1 for j in 0 .. bits_in_last_input_slot { if u32(instance.field[bitnslots - 1] >> u32(j)) & u32(1) == u32(1) { output.set_bit(bits_in_last_input_slot - j - 1) @@ -569,7 +569,7 @@ pub fn (mut instance BitField) resize(new_size int) { } instance.field = field.clone() instance.size = new_size - if new_size < old_size && new_size % bitfield.slot_size != 0 { + if new_size < old_size && new_size % slot_size != 0 { instance.clear_tail() } } @@ -604,7 +604,7 @@ pub fn (instance BitField) rotate(offset int) BitField { // clear_tail clears the extra bits that are not part of the bitfield, but yet are allocated @[inline] fn (mut instance BitField) clear_tail() { - tail := instance.size % bitfield.slot_size + tail := instance.size % slot_size if tail != 0 { // create a mask for the tail mask := u32((1 << tail) - 1) @@ -616,13 +616,13 @@ fn (mut instance BitField) clear_tail() { // bitmask is the bitmask needed to access a particular bit at offset bitnr @[inline] fn bitmask(bitnr int) u32 { - return u32(u32(1) << u32(bitnr % bitfield.slot_size)) + return u32(u32(1) << u32(bitnr % slot_size)) } // bitslot is the slot index (i.e. the integer) where a particular bit is located @[inline] fn bitslot(size int) int { - return size / bitfield.slot_size + return size / slot_size } // min returns the minimum of 2 integers; it is here to avoid importing math just for that @@ -638,5 +638,5 @@ fn min(input1 int, input2 int) int { // zbitnslots returns the minimum number of whole integers, needed to represent a bitfield of size length @[inline] fn zbitnslots(length int) int { - return (length - 1) / bitfield.slot_size + 1 + return (length - 1) / slot_size + 1 } diff --git a/vlib/builtin/wchar/wchar.c.v b/vlib/builtin/wchar/wchar.c.v index 87e55680ce3356..f563e37eee4ba8 100644 --- a/vlib/builtin/wchar/wchar.c.v +++ b/vlib/builtin/wchar/wchar.c.v @@ -50,7 +50,7 @@ pub fn from_rune(r rune) Character { pub fn length_in_characters(p voidptr) int { mut len := 0 pc := &Character(p) - for unsafe { pc[len] != wchar.zero } { + for unsafe { pc[len] != zero } { len++ } return len @@ -114,7 +114,7 @@ pub fn from_string(s string) &Character { for i, r in srunes { result[i] = from_rune(r) } - result[srunes.len] = wchar.zero + result[srunes.len] = zero return result } } diff --git a/vlib/cli/help.v b/vlib/cli/help.v index 2ab9634f0472e1..d5b16a49e74a6e 100644 --- a/vlib/cli/help.v +++ b/vlib/cli/help.v @@ -64,26 +64,26 @@ pub fn (cmd Command) help_message() string { help += '\n${cmd.description}\n' } mut abbrev_len := 0 - mut name_len := cli.min_description_indent_len + mut name_len := min_description_indent_len if cmd.posix_mode { for flag in cmd.flags { if flag.abbrev != '' { - abbrev_len = max(abbrev_len, flag.abbrev.len + cli.spacing + 1) // + 1 for '-' in front + abbrev_len = max(abbrev_len, flag.abbrev.len + spacing + 1) // + 1 for '-' in front } - name_len = max(name_len, abbrev_len + flag.name.len + cli.spacing + 2) // + 2 for '--' in front + name_len = max(name_len, abbrev_len + flag.name.len + spacing + 2) // + 2 for '--' in front } for command in cmd.commands { - name_len = max(name_len, command.name.len + cli.spacing) + name_len = max(name_len, command.name.len + spacing) } } else { for flag in cmd.flags { if flag.abbrev != '' { - abbrev_len = max(abbrev_len, flag.abbrev.len + cli.spacing + 1) // + 1 for '-' in front + abbrev_len = max(abbrev_len, flag.abbrev.len + spacing + 1) // + 1 for '-' in front } - name_len = max(name_len, abbrev_len + flag.name.len + cli.spacing + 1) // + 1 for '-' in front + name_len = max(name_len, abbrev_len + flag.name.len + spacing + 1) // + 1 for '-' in front } for command in cmd.commands { - name_len = max(name_len, command.name.len + cli.spacing) + name_len = max(name_len, command.name.len + spacing) } } if cmd.flags.len > 0 { @@ -102,17 +102,16 @@ pub fn (cmd Command) help_message() string { if flag.required { required = ' (required)' } - base_indent := ' '.repeat(cli.base_indent_len) + base_indent := ' '.repeat(base_indent_len) description_indent := ' '.repeat(name_len - flag_name.len) help += '${base_indent}${flag_name}${description_indent}' + - pretty_description(flag.description + required, cli.base_indent_len + name_len) + - '\n' + pretty_description(flag.description + required, base_indent_len + name_len) + '\n' } } if cmd.commands.len > 0 { help += '\nCommands:\n' for command in cmd.commands { - base_indent := ' '.repeat(cli.base_indent_len) + base_indent := ' '.repeat(base_indent_len) description_indent := ' '.repeat(name_len - command.name.len) help += '${base_indent}${command.name}${description_indent}' + pretty_description(command.description, name_len) + '\n' diff --git a/vlib/clipboard/clipboard_windows.c.v b/vlib/clipboard/clipboard_windows.c.v index 328846cdd80972..4d43110da4ce8a 100644 --- a/vlib/clipboard/clipboard_windows.c.v +++ b/vlib/clipboard/clipboard_windows.c.v @@ -142,13 +142,13 @@ const cp_utf8 = 65001 // the string.to_wide doesn't work with SetClipboardData, don't know why fn to_wide(text string) C.HGLOBAL { - len_required := C.MultiByteToWideChar(clipboard.cp_utf8, C.MB_ERR_INVALID_CHARS, voidptr(text.str), + len_required := C.MultiByteToWideChar(cp_utf8, C.MB_ERR_INVALID_CHARS, voidptr(text.str), text.len + 1, C.NULL, 0) buf := C.GlobalAlloc(C.GMEM_MOVEABLE, i64(sizeof(u16)) * len_required) if buf != unsafe { nil } { mut locked := &u16(C.GlobalLock(buf)) - C.MultiByteToWideChar(clipboard.cp_utf8, C.MB_ERR_INVALID_CHARS, voidptr(text.str), - text.len + 1, locked, len_required) + C.MultiByteToWideChar(cp_utf8, C.MB_ERR_INVALID_CHARS, voidptr(text.str), text.len + 1, + locked, len_required) unsafe { locked[len_required - 1] = u16(0) } diff --git a/vlib/clipboard/x11/clipboard.c.v b/vlib/clipboard/x11/clipboard.c.v index 11e4bb6b4ecc77..7ffca3282e8ebd 100644 --- a/vlib/clipboard/x11/clipboard.c.v +++ b/vlib/clipboard/x11/clipboard.c.v @@ -390,7 +390,7 @@ fn (mut cb Clipboard) start_listener() { fn (mut cb Clipboard) intern_atoms() { cb.atoms << Atom(4) // XA_ATOM cb.atoms << Atom(31) // XA_STRING - for i, name in x11.atom_names { + for i, name in atom_names { only_if_exists := if i == int(AtomType.utf8_string) { 1 } else { 0 } cb.atoms << C.XInternAtom(cb.display, &char(name.str), only_if_exists) if i == int(AtomType.utf8_string) && cb.atoms[i] == Atom(0) { diff --git a/vlib/compress/compress.c.v b/vlib/compress/compress.c.v index e921b88a8c697a..8206874beb0fb3 100644 --- a/vlib/compress/compress.c.v +++ b/vlib/compress/compress.c.v @@ -12,8 +12,8 @@ fn C.tinfl_decompress_mem_to_heap(source_buf voidptr, source_buf_len usize, out_ // NB: this is a low level api, a high level implementation like zlib/gzip should be preferred @[manualfree] pub fn compress(data []u8, flags int) ![]u8 { - if u64(data.len) > compress.max_size { - return error('data too large (${data.len} > ${compress.max_size})') + if u64(data.len) > max_size { + return error('data too large (${data.len} > ${max_size})') } mut out_len := usize(0) @@ -21,8 +21,8 @@ pub fn compress(data []u8, flags int) ![]u8 { if address == 0 { return error('compression failed') } - if u64(out_len) > compress.max_size { - return error('compressed data is too large (${out_len} > ${compress.max_size})') + if u64(out_len) > max_size { + return error('compressed data is too large (${out_len} > ${max_size})') } return unsafe { address.vbytes(int(out_len)) } } @@ -37,8 +37,8 @@ pub fn decompress(data []u8, flags int) ![]u8 { if address == 0 { return error('decompression failed') } - if u64(out_len) > compress.max_size { - return error('decompressed data is too large (${out_len} > ${compress.max_size})') + if u64(out_len) > max_size { + return error('decompressed data is too large (${out_len} > ${max_size})') } return unsafe { address.vbytes(int(out_len)) } } diff --git a/vlib/compress/deflate/deflate_test.v b/vlib/compress/deflate/deflate_test.v index 34bcbbdae519e2..2db48f6c5431d1 100644 --- a/vlib/compress/deflate/deflate_test.v +++ b/vlib/compress/deflate/deflate_test.v @@ -6,7 +6,7 @@ fn test_gzip() { uncompressed := 'Hello world!' compressed := compress(uncompressed.bytes())! first2 := compressed[0..2] - assert first2 != deflate.gzip_magic_numbers + assert first2 != gzip_magic_numbers decompressed := decompress(compressed)! assert decompressed == uncompressed.bytes() } diff --git a/vlib/compress/gzip/gzip.v b/vlib/compress/gzip/gzip.v index 992a83192fc273..e5c8513fe0e21f 100644 --- a/vlib/compress/gzip/gzip.v +++ b/vlib/compress/gzip/gzip.v @@ -70,7 +70,7 @@ pub mut: // validate validates the header and returns its details if valid pub fn validate(data []u8, params DecompressParams) !GzipHeader { - if data.len < gzip.min_header_length { + if data.len < min_header_length { return error('data is too short, not gzip compressed?') } else if data[0] != 0x1f || data[1] != 0x8b { return error('wrong magic numbers, not gzip compressed?') @@ -83,7 +83,7 @@ pub fn validate(data []u8, params DecompressParams) !GzipHeader { // correctly, so we dont accidently decompress something that belongs // to the header - if data[3] & gzip.reserved_bits > 0 { + if data[3] & reserved_bits > 0 { // rfc 1952 2.3.1.2 Compliance // A compliant decompressor must give an error indication if any // reserved bit is non-zero, since such a bit could indicate the @@ -92,12 +92,12 @@ pub fn validate(data []u8, params DecompressParams) !GzipHeader { return error('reserved flags are set, unsupported field detected') } - if data[3] & gzip.fextra > 0 { + if data[3] & fextra > 0 { xlen := data[header.length] header.extra = data[header.length + 1..header.length + 1 + xlen] header.length += xlen + 1 } - if data[3] & gzip.fname > 0 { + if data[3] & fname > 0 { // filename is zero-terminated, so skip until we hit a zero byte for header.length < data.len && data[header.length] != 0x00 { header.filename << data[header.length] @@ -105,7 +105,7 @@ pub fn validate(data []u8, params DecompressParams) !GzipHeader { } header.length++ } - if data[3] & gzip.fcomment > 0 { + if data[3] & fcomment > 0 { // comment is zero-terminated, so skip until we hit a zero byte for header.length < data.len && data[header.length] != 0x00 { header.comment << data[header.length] @@ -113,7 +113,7 @@ pub fn validate(data []u8, params DecompressParams) !GzipHeader { } header.length++ } - if data[3] & gzip.fhcrc > 0 { + if data[3] & fhcrc > 0 { if header.length + 12 > data.len { return error('data too short') } diff --git a/vlib/compress/zstd/zstd.c.v b/vlib/compress/zstd/zstd.c.v index 7b7a4119e2dcb0..78b7eca279fac5 100644 --- a/vlib/compress/zstd/zstd.c.v +++ b/vlib/compress/zstd/zstd.c.v @@ -431,10 +431,10 @@ pub: // extra decompression parameters can be set by `params` // Example: decompressed := zstd.decompress(b)! pub fn decompress(data []u8, params DecompressParams) ![]u8 { - dst_capacity := C.ZSTD_getFrameContentSize(data.data, zstd.zstd_frame_header_size_max) - if dst_capacity == zstd.zstd_content_size_unknown { + dst_capacity := C.ZSTD_getFrameContentSize(data.data, zstd_frame_header_size_max) + if dst_capacity == zstd_content_size_unknown { return error('The size cannot be determined, try use streaming mode to decompress data?') - } else if dst_capacity == zstd.zstd_content_size_error { + } else if dst_capacity == zstd_content_size_error { return error('An error occurred (e.g. invalid magic number, srcSize too small)') } else if dst_capacity == 0 { return error('The frame is valid but empty') @@ -543,7 +543,7 @@ pub fn store_array[T](fname string, array []T, params CompressParams) ! { fout.close() } - mut buf_out := []u8{len: zstd.buf_out_size} + mut buf_out := []u8{len: buf_out_size} mut input := &ZSTD_inBuffer{} mut output := &ZSTD_outBuffer{} mut remaining := usize(1) @@ -554,7 +554,7 @@ pub fn store_array[T](fname string, array []T, params CompressParams) ! { input.size = 8 input.pos = 0 output.dst = buf_out.data - output.size = zstd.buf_out_size + output.size = buf_out_size output.pos = 0 remaining = cctx.compress_stream2(output, input, .zstd_e_flush) check_zstd(remaining)! @@ -564,12 +564,12 @@ pub fn store_array[T](fname string, array []T, params CompressParams) ! { input.size = usize(array.len * sizeof(T)) input.pos = 0 output.dst = buf_out.data - output.size = zstd.buf_out_size + output.size = buf_out_size output.pos = 0 remaining = 1 for remaining != 0 { output.dst = buf_out.data - output.size = zstd.buf_out_size + output.size = buf_out_size output.pos = 0 remaining = cctx.compress_stream2(output, input, .zstd_e_end) check_zstd(remaining)! @@ -587,7 +587,7 @@ pub fn load_array[T](fname string, params DecompressParams) ![]T { fin.close() } - mut buf_in := []u8{len: zstd.buf_in_size} + mut buf_in := []u8{len: buf_in_size} mut len_buf := []u8{len: 8} mut input := &ZSTD_inBuffer{} mut output := &ZSTD_outBuffer{} diff --git a/vlib/compress/zstd/zstd_test.v b/vlib/compress/zstd/zstd_test.v index c365b1e94944c8..0f2785ea3f42d5 100644 --- a/vlib/compress/zstd/zstd_test.v +++ b/vlib/compress/zstd/zstd_test.v @@ -5,7 +5,7 @@ import os const samples_folder = os.join_path(os.dir(@FILE), 'samples') fn s(fname string) string { - return os.join_path(zstd.samples_folder, fname) + return os.join_path(samples_folder, fname) } fn test_zstd() { diff --git a/vlib/context/onecontext/onecontext.v b/vlib/context/onecontext/onecontext.v index b1d4704b78da89..c9c43654756f90 100644 --- a/vlib/context/onecontext/onecontext.v +++ b/vlib/context/onecontext/onecontext.v @@ -118,7 +118,7 @@ pub fn (mut octx OneContext) run_two_contexts(mut ctx1 context.Context, mut ctx2 c2done := ctx2.done() select { _ := <-octx_cancel_done { - octx.cancel(onecontext.canceled) + octx.cancel(canceled) } _ := <-c1done { octx.cancel(ctx1.err()) @@ -136,7 +136,7 @@ pub fn (mut octx OneContext) run_multiple_contexts(mut ctx context.Context) { cdone := ctx.done() select { _ := <-octx_cancel_done { - octx.cancel(onecontext.canceled) + octx.cancel(canceled) } _ := <-cdone { octx.cancel(ctx.err()) diff --git a/vlib/crypto/aes/aes.v b/vlib/crypto/aes/aes.v index cfde9f67d46a31..51043ef4e18c82 100644 --- a/vlib/crypto/aes/aes.v +++ b/vlib/crypto/aes/aes.v @@ -16,7 +16,7 @@ pub const block_size = 16 // handle only one block of data at a time. In most cases, you // probably want to encrypt and decrypt using [[AesCbc](#AesCbc)] struct AesCipher { - block_size int = aes.block_size + block_size int = block_size mut: enc []u32 dec []u32 @@ -55,7 +55,7 @@ pub fn new_cipher(key []u8) cipher.Block { // block_size returns the block size of the checksum in bytes. pub fn (c &AesCipher) block_size() int { - return aes.block_size + return block_size } // encrypt encrypts the first block of data in `src` to `dst`. @@ -63,14 +63,14 @@ pub fn (c &AesCipher) block_size() int { // NOTE: `dst` and `src` must both be pre-allocated to the correct length. // NOTE: `dst` and `src` may be the same (overlapping entirely). pub fn (c &AesCipher) encrypt(mut dst []u8, src []u8) { - if src.len < aes.block_size { + if src.len < block_size { panic('crypto.aes: input not full block') } - if dst.len < aes.block_size { + if dst.len < block_size { panic('crypto.aes: output not full block') } // if subtle.inexact_overlap(dst[:block_size], src[:block_size]) { - if subtle.inexact_overlap(dst[..aes.block_size], src[..aes.block_size]) { + if subtle.inexact_overlap(dst[..block_size], src[..block_size]) { panic('crypto.aes: invalid buffer overlap') } // for now use generic version @@ -82,13 +82,13 @@ pub fn (c &AesCipher) encrypt(mut dst []u8, src []u8) { // NOTE: `dst` and `src` must both be pre-allocated to the correct length. // NOTE: `dst` and `src` may be the same (overlapping entirely). pub fn (c &AesCipher) decrypt(mut dst []u8, src []u8) { - if src.len < aes.block_size { + if src.len < block_size { panic('crypto.aes: input not full block') } - if dst.len < aes.block_size { + if dst.len < block_size { panic('crypto.aes: output not full block') } - if subtle.inexact_overlap(dst[..aes.block_size], src[..aes.block_size]) { + if subtle.inexact_overlap(dst[..block_size], src[..block_size]) { panic('crypto.aes: invalid buffer overlap') } // for now use generic version diff --git a/vlib/crypto/bcrypt/base64.v b/vlib/crypto/bcrypt/base64.v index 4ee8af54200279..de4c17b08f5383 100644 --- a/vlib/crypto/bcrypt/base64.v +++ b/vlib/crypto/bcrypt/base64.v @@ -3,7 +3,7 @@ module bcrypt const alphabet = './ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789' fn char64(c u8) u8 { - for i, ch in bcrypt.alphabet { + for i, ch in alphabet { if ch == c { return u8(i) } @@ -69,28 +69,28 @@ fn base64_encode(data []u8) string { for src_index < data.len { mut c1 := data[src_index] src_index += 1 - result << bcrypt.alphabet[c1 >> 2] + result << alphabet[c1 >> 2] c1 = (c1 & 0x03) << 4 if src_index >= data.len { - result << bcrypt.alphabet[c1] + result << alphabet[c1] break } mut c2 := data[src_index] src_index += 1 c1 |= (c2 >> 4) & 0x0f - result << bcrypt.alphabet[c1] + result << alphabet[c1] c1 = (c2 & 0x0f) << 2 if src_index >= data.len { - result << bcrypt.alphabet[c1] + result << alphabet[c1] break } c2 = data[src_index] src_index += 1 c1 |= (c2 >> 6) & 0x03 - result << bcrypt.alphabet[c1] - result << bcrypt.alphabet[c2 & 0x3f] + result << alphabet[c1] + result << alphabet[c2 & 0x3f] } return result.bytestr() diff --git a/vlib/crypto/bcrypt/bcrypt.v b/vlib/crypto/bcrypt/bcrypt.v index 342ce6bcfa82b8..8d1b2d4eb2887b 100644 --- a/vlib/crypto/bcrypt/bcrypt.v +++ b/vlib/crypto/bcrypt/bcrypt.v @@ -67,21 +67,21 @@ pub fn compare_hash_and_password(password []u8, hashed_password []u8) ! { // generate_salt generate a string to be treated as a salt. pub fn generate_salt() string { - randbytes := rand.bytes(bcrypt.salt_length) or { panic(err) } + randbytes := rand.bytes(salt_length) or { panic(err) } return randbytes.bytestr() } // new_from_password converting from password to a Hashed struct with bcrypt. fn new_from_password(password []u8, cost int) !&Hashed { mut cost_ := cost - if cost < bcrypt.min_cost { - cost_ = bcrypt.default_cost + if cost < min_cost { + cost_ = default_cost } mut p := &Hashed{} - p.major = bcrypt.major_version - p.minor = bcrypt.minor_version + p.major = major_version + p.minor = minor_version - if cost_ < bcrypt.min_cost || cost_ > bcrypt.max_cost { + if cost_ < min_cost || cost_ > max_cost { return error('invalid cost') } p.cost = cost_ @@ -96,7 +96,7 @@ fn new_from_password(password []u8, cost int) !&Hashed { // new_from_hash converting from hashed data to a Hashed struct. fn new_from_hash(hashed_secret []u8) !&Hashed { mut tmp := hashed_secret.clone() - if tmp.len < bcrypt.min_hash_size { + if tmp.len < min_hash_size { return error('hash to short') } @@ -107,15 +107,15 @@ fn new_from_hash(hashed_secret []u8) !&Hashed { n = p.decode_cost(tmp) or { return err } tmp = tmp[n..].clone() - p.salt = tmp[..bcrypt.encoded_salt_size].clone() - p.hash = tmp[bcrypt.encoded_salt_size..].clone() + p.salt = tmp[..encoded_salt_size].clone() + p.hash = tmp[encoded_salt_size..].clone() return p } // bcrypt hashing passwords. fn bcrypt(password []u8, cost int, salt []u8) ![]u8 { - mut cipher_data := bcrypt.magic_cipher_data.clone() + mut cipher_data := magic_cipher_data.clone() mut bf := expensive_blowfish_setup(password, u32(cost), salt) or { return err } for i := 0; i < 24; i += 8 { @@ -123,7 +123,7 @@ fn bcrypt(password []u8, cost int, salt []u8) ![]u8 { bf.encrypt(mut cipher_data[i..i + 8], cipher_data[i..i + 8]) } } - hash := base64_encode(cipher_data[..bcrypt.max_crypted_hash_size]) + hash := base64_encode(cipher_data[..max_crypted_hash_size]) return hash.bytes() } @@ -165,9 +165,9 @@ fn (mut h Hashed) hash_u8() []u8 { arr[n] = `$` n++ copy(mut arr[n..], h.salt) - n += bcrypt.encoded_salt_size + n += encoded_salt_size copy(mut arr[n..], h.hash) - n += bcrypt.encoded_hash_size + n += encoded_hash_size res := arr[..n].clone() return res } @@ -177,8 +177,8 @@ fn (mut h Hashed) decode_version(sbytes []u8) !int { if sbytes[0] != `$` { return error("bcrypt hashes must start with '$'") } - if sbytes[1] != bcrypt.major_version[0] { - return error('bcrypt algorithm version ${bcrypt.major_version}') + if sbytes[1] != major_version[0] { + return error('bcrypt algorithm version ${major_version}') } h.major = sbytes[1].ascii_str() mut n := 3 @@ -199,7 +199,7 @@ fn (mut h Hashed) decode_cost(sbytes []u8) !int { // check_cost check for reasonable quantities. fn check_cost(cost int) ! { - if cost < bcrypt.min_cost || cost > bcrypt.max_cost { + if cost < min_cost || cost > max_cost { return error('invalid cost') } } diff --git a/vlib/crypto/blake2b/blake2b.v b/vlib/crypto/blake2b/blake2b.v index b825314e689d9c..2a98617f7fc0e7 100644 --- a/vlib/crypto/blake2b/blake2b.v +++ b/vlib/crypto/blake2b/blake2b.v @@ -77,42 +77,42 @@ pub fn (d Digest) str() string { // new512 initializes the digest structure for a Blake2b 512 bit hash pub fn new512() !&Digest { - return new_digest(blake2b.size512, []u8{})! + return new_digest(size512, []u8{})! } // new_pmac512 initializes the digest structure for a Blake2b 512 bit prefix MAC pub fn new_pmac512(key []u8) !&Digest { - return new_digest(blake2b.size512, key)! + return new_digest(size512, key)! } // new384 initializes the digest structure for a Blake2b 384 bit hash pub fn new384() !&Digest { - return new_digest(blake2b.size384, []u8{})! + return new_digest(size384, []u8{})! } // new_pmac384 initializes the digest structure for a Blake2b 384 bit prefix MAC pub fn new_pmac384(key []u8) !&Digest { - return new_digest(blake2b.size384, key)! + return new_digest(size384, key)! } // new256 initializes the digest structure for a Blake2b 256 bit hash pub fn new256() !&Digest { - return new_digest(blake2b.size256, []u8{})! + return new_digest(size256, []u8{})! } // new_pmac256 initializes the digest structure for a Blake2b 256 bit prefix MAC pub fn new_pmac256(key []u8) !&Digest { - return new_digest(blake2b.size256, key)! + return new_digest(size256, key)! } // new160 initializes the digest structure for a Blake2b 160 bit hash pub fn new160() !&Digest { - return new_digest(blake2b.size160, []u8{})! + return new_digest(size160, []u8{})! } // new_pmac160 initializes the digest structure for a Blake2b 160 bit prefix MAC pub fn new_pmac160(key []u8) !&Digest { - return new_digest(blake2b.size160, key)! + return new_digest(size160, key)! } struct HashSizeError { @@ -121,7 +121,7 @@ struct HashSizeError { } fn (err HashSizeError) msg() string { - return 'Hash size ${err.size} must be between 1 and ${blake2b.size512}' + return 'Hash size ${err.size} must be between 1 and ${size512}' } struct KeySizeError { @@ -130,7 +130,7 @@ struct KeySizeError { } fn (err KeySizeError) msg() string { - return 'Key size ${err.size} must be between 0 and ${blake2b.size512}' + return 'Key size ${err.size} must be between 0 and ${size512}' } struct InputBufferSizeError { @@ -139,7 +139,7 @@ struct InputBufferSizeError { } fn (err InputBufferSizeError) msg() string { - return 'The input buffer size ${err.size} .must be between 0 and ${blake2b.block_size}' + return 'The input buffer size ${err.size} .must be between 0 and ${block_size}' } // new_digest creates an initialized digest structure based on @@ -152,20 +152,20 @@ fn (err InputBufferSizeError) msg() string { // key is used for just generating a hash. A key of 1 to // 64 bytes can be used for generating a prefix MAC. pub fn new_digest(hash_size u8, key []u8) !&Digest { - if hash_size < 1 || hash_size > blake2b.size512 { + if hash_size < 1 || hash_size > size512 { return HashSizeError{ size: hash_size } } - if key.len < 0 || key.len > blake2b.size512 { + if key.len < 0 || key.len > size512 { return KeySizeError{ size: i32(key.len) } } mut d := Digest{ - h: blake2b.iv.clone() + h: iv.clone() t: unsigned.uint128_zero hash_size: hash_size } @@ -178,7 +178,7 @@ pub fn new_digest(hash_size u8, key []u8) !&Digest { d.input_buffer.clear() d.input_buffer << key - pad_length := blake2b.block_size - key.len + pad_length := block_size - key.len for _ in 0 .. pad_length { d.input_buffer << 0 } @@ -209,7 +209,7 @@ pub fn new_digest(hash_size u8, key []u8) !&Digest { fn (mut d Digest) move_input_to_message_blocks() ! { // the number of bytes in the input buffer // should never exceed the block size. - if d.input_buffer.len < 0 || d.input_buffer.len > blake2b.block_size { + if d.input_buffer.len < 0 || d.input_buffer.len > block_size { return InputBufferSizeError{ size: i32(d.input_buffer.len) } @@ -219,8 +219,8 @@ fn (mut d Digest) move_input_to_message_blocks() ! { d.t = d.t.add_64(u64(d.input_buffer.len)) // pad the input buffer if necessary - if d.input_buffer.len < blake2b.block_size { - pad_length := blake2b.block_size - d.input_buffer.len + if d.input_buffer.len < block_size { + pad_length := block_size - d.input_buffer.len for _ in 0 .. pad_length { d.input_buffer << 0 @@ -249,7 +249,7 @@ pub fn (mut d Digest) write(data []u8) ! { // if the input buffer is already full, // process the existing input bytes first. // this is not the final input. - if d.input_buffer.len >= blake2b.block_size { + if d.input_buffer.len >= block_size { d.move_input_to_message_blocks()! d.f(false) } @@ -257,7 +257,7 @@ pub fn (mut d Digest) write(data []u8) ! { // add data to the input buffer until you // run out of space in the input buffer or // run out of data, whichever comes first. - empty_space := blake2b.block_size - d.input_buffer.len + empty_space := block_size - d.input_buffer.len mut remaining_data := unsafe { data[..] } if empty_space >= data.len { @@ -278,7 +278,7 @@ pub fn (mut d Digest) write(data []u8) ! { // process the data in block size amounts until // all the data has been processed for remaining_data.len > 0 { - if blake2b.block_size >= remaining_data.len { + if block_size >= remaining_data.len { // running out of data // just add it to the input buffer and return d.input_buffer << remaining_data @@ -287,8 +287,8 @@ pub fn (mut d Digest) write(data []u8) ! { // add block size bytes to the input buffer // and process it - d.input_buffer << remaining_data[..blake2b.block_size] - remaining_data = unsafe { remaining_data[blake2b.block_size..] } + d.input_buffer << remaining_data[..block_size] + remaining_data = unsafe { remaining_data[block_size..] } d.move_input_to_message_blocks()! d.f(false) diff --git a/vlib/crypto/blake2b/blake2b_block_test.v b/vlib/crypto/blake2b/blake2b_block_test.v index b45b813bd8560a..b45c9c3a5c4fde 100644 --- a/vlib/crypto/blake2b/blake2b_block_test.v +++ b/vlib/crypto/blake2b/blake2b_block_test.v @@ -106,18 +106,18 @@ fn test_mixing_function_g() { } for i in 0 .. 16 { - assert v[i] == blake2b.expected_v_initial_results[i], 'expeccted expected_v_initial_results[${i}] ${blake2b.expected_v_initial_results[i]:016x} actual v[${i}] ${v[i]:016x}' + assert v[i] == expected_v_initial_results[i], 'expeccted expected_v_initial_results[${i}] ${expected_v_initial_results[i]:016x} actual v[${i}] ${v[i]:016x}' } for i in 0 .. 16 { - assert d.m[i] == blake2b.expected_m_results[i], 'expeccted expected_m_results[${i}] ${blake2b.expected_m_results[i]:016x} actual d.m[${i}] ${d.m[i]:016x}' + assert d.m[i] == expected_m_results[i], 'expeccted expected_m_results[${i}] ${expected_m_results[i]:016x} actual d.m[${i}] ${d.m[i]:016x}' } - for r in 0 .. blake2b.expected_v_results.len { + for r in 0 .. expected_v_results.len { d.mixing_round(mut v, sigma[r % 10]) for i in 0 .. 16 { - assert v[i] == blake2b.expected_v_results[r][i], 'expeccted expected_v_results[${r}][${i}] ${blake2b.expected_v_results[r][i]:016x} actual v[${i}] ${v[i]:016x}' + assert v[i] == expected_v_results[r][i], 'expeccted expected_v_results[${r}][${i}] ${expected_v_results[r][i]:016x} actual v[${i}] ${v[i]:016x}' } } @@ -131,6 +131,6 @@ fn test_mixing_function_g() { d.h[7] = d.h[7] ^ v[7] ^ v[15] for i in 0 .. 8 { - assert d.h[i] == blake2b.expected_h_results[i], 'expeccted expected_h_results[${i}] ${blake2b.expected_h_results[i]:016x} actual d.h[${i}] ${d.h[i]:016x}' + assert d.h[i] == expected_h_results[i], 'expeccted expected_h_results[${i}] ${expected_h_results[i]:016x} actual d.h[${i}] ${d.h[i]:016x}' } } diff --git a/vlib/crypto/blake2s/blake2s.v b/vlib/crypto/blake2s/blake2s.v index 4007ed33510178..124dd320c4a7d0 100644 --- a/vlib/crypto/blake2s/blake2s.v +++ b/vlib/crypto/blake2s/blake2s.v @@ -76,42 +76,42 @@ pub fn (d Digest) str() string { // new256 initializes the digest structure for a Blake2s 256 bit hash pub fn new256() !&Digest { - return new_digest(blake2s.size256, []u8{})! + return new_digest(size256, []u8{})! } // new_pmac256 initializes the digest structure for a Blake2s 256 bit prefix MAC pub fn new_pmac256(key []u8) !&Digest { - return new_digest(blake2s.size256, key)! + return new_digest(size256, key)! } // new224 initializes the digest structure for a Blake2s 224 bit hash pub fn new224() !&Digest { - return new_digest(blake2s.size224, []u8{})! + return new_digest(size224, []u8{})! } // new_pmac224 initializes the digest structure for a Blake2s 224 bit prefix MAC pub fn new_pmac224(key []u8) !&Digest { - return new_digest(blake2s.size224, key)! + return new_digest(size224, key)! } // new160 initializes the digest structure for a Blake2s 160 bit hash pub fn new160() !&Digest { - return new_digest(blake2s.size160, []u8{})! + return new_digest(size160, []u8{})! } // new_pmac160 initializes the digest structure for a Blake2s 160 bit prefix MAC pub fn new_pmac160(key []u8) !&Digest { - return new_digest(blake2s.size160, key)! + return new_digest(size160, key)! } // new126 initializes the digest structure for a Blake2s 128 bit hash pub fn new128() !&Digest { - return new_digest(blake2s.size128, []u8{})! + return new_digest(size128, []u8{})! } // new_pmac128 initializes the digest structure for a Blake2s 128 bit prefix MAC pub fn new_pmac128(key []u8) !&Digest { - return new_digest(blake2s.size128, key)! + return new_digest(size128, key)! } struct HashSizeError { @@ -120,7 +120,7 @@ struct HashSizeError { } fn (err HashSizeError) msg() string { - return 'Hash size ${err.size} must be between 1 and ${blake2s.size256}' + return 'Hash size ${err.size} must be between 1 and ${size256}' } struct KeySizeError { @@ -129,7 +129,7 @@ struct KeySizeError { } fn (err KeySizeError) msg() string { - return 'Key size ${err.size} must be between 0 and ${blake2s.size256}' + return 'Key size ${err.size} must be between 0 and ${size256}' } struct InputBufferSizeError { @@ -138,7 +138,7 @@ struct InputBufferSizeError { } fn (err InputBufferSizeError) msg() string { - return 'The input buffer size ${err.size} .must be between 0 and ${blake2s.block_size}' + return 'The input buffer size ${err.size} .must be between 0 and ${block_size}' } // new_digest creates an initialized digest structure based on @@ -151,20 +151,20 @@ fn (err InputBufferSizeError) msg() string { // key is used for just generating a hash. A key of 1 to // 32 bytes can be used for generating a prefix MAC. pub fn new_digest(hash_size u8, key []u8) !&Digest { - if hash_size < 1 || hash_size > blake2s.size256 { + if hash_size < 1 || hash_size > size256 { return HashSizeError{ size: hash_size } } - if key.len < 0 || key.len > blake2s.size256 { + if key.len < 0 || key.len > size256 { return KeySizeError{ size: i32(key.len) } } mut d := Digest{ - h: blake2s.iv.clone() + h: iv.clone() t: 0 hash_size: hash_size } @@ -177,7 +177,7 @@ pub fn new_digest(hash_size u8, key []u8) !&Digest { d.input_buffer.clear() d.input_buffer << key - pad_length := blake2s.block_size - key.len + pad_length := block_size - key.len for _ in 0 .. pad_length { d.input_buffer << 0 } @@ -208,7 +208,7 @@ pub fn new_digest(hash_size u8, key []u8) !&Digest { fn (mut d Digest) move_input_to_message_blocks() ! { // the number of bytes in the input buffer // should never exceed the block size. - if d.input_buffer.len < 0 || d.input_buffer.len > blake2s.block_size { + if d.input_buffer.len < 0 || d.input_buffer.len > block_size { return InputBufferSizeError{ size: i32(d.input_buffer.len) } @@ -218,8 +218,8 @@ fn (mut d Digest) move_input_to_message_blocks() ! { d.t += u64(d.input_buffer.len) // pad the input buffer if necessary - if d.input_buffer.len < blake2s.block_size { - pad_length := blake2s.block_size - d.input_buffer.len + if d.input_buffer.len < block_size { + pad_length := block_size - d.input_buffer.len for _ in 0 .. pad_length { d.input_buffer << 0 @@ -248,7 +248,7 @@ pub fn (mut d Digest) write(data []u8) ! { // if the input buffer is already full, // process the existing input bytes first. // this is not the final input. - if d.input_buffer.len >= blake2s.block_size { + if d.input_buffer.len >= block_size { d.move_input_to_message_blocks()! d.f(false) } @@ -256,7 +256,7 @@ pub fn (mut d Digest) write(data []u8) ! { // add data to the input buffer until you // run out of space in the input buffer or // run out of data, whichever comes first. - empty_space := blake2s.block_size - d.input_buffer.len + empty_space := block_size - d.input_buffer.len mut remaining_data := unsafe { data[..] } if empty_space >= data.len { @@ -277,7 +277,7 @@ pub fn (mut d Digest) write(data []u8) ! { // process the data in block size amounts until // all the data has been processed for remaining_data.len > 0 { - if blake2s.block_size >= remaining_data.len { + if block_size >= remaining_data.len { // running out of data // just add it to the input buffer and return d.input_buffer << remaining_data @@ -286,8 +286,8 @@ pub fn (mut d Digest) write(data []u8) ! { // add block size bytes to the input buffer // and process it - d.input_buffer << remaining_data[..blake2s.block_size] - remaining_data = unsafe { remaining_data[blake2s.block_size..] } + d.input_buffer << remaining_data[..block_size] + remaining_data = unsafe { remaining_data[block_size..] } d.move_input_to_message_blocks()! d.f(false) diff --git a/vlib/crypto/blake2s/blake2s_block_test.v b/vlib/crypto/blake2s/blake2s_block_test.v index 0db9e2b441e7ca..72f308f3d699f9 100644 --- a/vlib/crypto/blake2s/blake2s_block_test.v +++ b/vlib/crypto/blake2s/blake2s_block_test.v @@ -83,18 +83,18 @@ fn test_mixing_function_g() { } for i in 0 .. 16 { - assert v[i] == blake2s.expected_v_initial_results[i], 'expeccted expected_v_initial_results[${i}] ${blake2s.expected_v_initial_results[i]:08x} actual v[${i}] ${v[i]:08x}' + assert v[i] == expected_v_initial_results[i], 'expeccted expected_v_initial_results[${i}] ${expected_v_initial_results[i]:08x} actual v[${i}] ${v[i]:08x}' } for i in 0 .. 16 { - assert d.m[i] == blake2s.expected_m_results[i], 'expeccted expected_m_results[${i}] ${blake2s.expected_m_results[i]:08x} actual d.m[${i}] ${d.m[i]:08x}' + assert d.m[i] == expected_m_results[i], 'expeccted expected_m_results[${i}] ${expected_m_results[i]:08x} actual d.m[${i}] ${d.m[i]:08x}' } - for r in 0 .. blake2s.expected_v_results.len { + for r in 0 .. expected_v_results.len { d.mixing_round(mut v, sigma[r]) for i in 0 .. 16 { - assert v[i] == blake2s.expected_v_results[r][i], 'expeccted expected_v_results[${r}][${i}] ${blake2s.expected_v_results[r][i]:08x} actual v[${i}] ${v[i]:08x}' + assert v[i] == expected_v_results[r][i], 'expeccted expected_v_results[${r}][${i}] ${expected_v_results[r][i]:08x} actual v[${i}] ${v[i]:08x}' } } @@ -108,6 +108,6 @@ fn test_mixing_function_g() { d.h[7] = d.h[7] ^ v[7] ^ v[15] for i in 0 .. 8 { - assert d.h[i] == blake2s.expected_h_results[i], 'expeccted expected_h_results[${i}] ${blake2s.expected_h_results[i]:08x} actual d.h[${i}] ${d.h[i]:08x}' + assert d.h[i] == expected_h_results[i], 'expeccted expected_h_results[${i}] ${expected_h_results[i]:08x} actual d.h[${i}] ${d.h[i]:08x}' } } diff --git a/vlib/crypto/blake3/blake3.v b/vlib/crypto/blake3/blake3.v index f3c023a65f3552..cfd66551b6c282 100644 --- a/vlib/crypto/blake3/blake3.v +++ b/vlib/crypto/blake3/blake3.v @@ -131,7 +131,7 @@ mut: // Digest.new_hash initializes a Digest structure for a Blake3 hash pub fn Digest.new_hash() !Digest { - return Digest.new(blake3.iv, 0) + return Digest.new(iv, 0) } // Digest.new_keyed_hash initializes a Digest structure for a Blake3 keyed hash @@ -147,10 +147,10 @@ pub fn Digest.new_keyed_hash(key []u8) !Digest { // Digest.new_derive_key_hash initializes a Digest structure for deriving a Blake3 key pub fn Digest.new_derive_key_hash(context []u8) !Digest { - mut context_digest := Digest.new(blake3.iv, u32(Flags.derive_key_context))! + mut context_digest := Digest.new(iv, u32(Flags.derive_key_context))! context_digest.write(context)! - context_key := context_digest.checksum_internal(blake3.key_length) + context_key := context_digest.checksum_internal(key_length) // treat the context key bytes as little endian u32 values mut key_words := []u32{len: 8, cap: 8} @@ -206,15 +206,15 @@ pub fn (mut d Digest) write(data []u8) ! { // if we have more than 1024 bytes in the input, // process it in chunks. - for d.input.len > blake3.chunk_size { + for d.input.len > chunk_size { mut chunk := Chunk{} - words := chunk.process_input(d.input[..blake3.chunk_size], d.key_words, d.chunk_counter, + words := chunk.process_input(d.input[..chunk_size], d.key_words, d.chunk_counter, d.flags, false) d.add_node(Node{ chaining_value: words[..8] }, 0) d.chunk_counter += 1 - d.input = d.input[blake3.chunk_size..] + d.input = d.input[chunk_size..] } } @@ -261,12 +261,12 @@ fn (mut d Digest) checksum_internal(size u64) []u8 { mut flags := d.flags | u32(Flags.parent) flags |= if i == d.binary_edge.len - 1 { u32(Flags.root) } else { u32(0) } - words = f(d.key_words, block_words, u64(0), blake3.block_size, flags) + words = f(d.key_words, block_words, u64(0), block_size, flags) state.words = words state.chaining_value = d.key_words state.block_words = block_words - state.block_len = blake3.block_size + state.block_len = block_size state.flags = flags right_node = Node{ @@ -327,7 +327,7 @@ fn (mut d Digest) add_node(node Node, level u8) { mut block_words := edge_node.chaining_value.clone() block_words << node.chaining_value - words := f(d.key_words, block_words, u64(0), blake3.block_size, d.flags | u32(Flags.parent)) + words := f(d.key_words, block_words, u64(0), block_size, d.flags | u32(Flags.parent)) parent_node := Node{ chaining_value: words[..8] } @@ -345,19 +345,19 @@ fn (mut d Digest) add_node(node Node, level u8) { pub fn sum256(data []u8) []u8 { mut d := Digest.new_hash() or { panic(err) } d.write(data) or { panic(err) } - return d.checksum_internal(blake3.size256) + return d.checksum_internal(size256) } // sum_keyed256 returns the Blake3 256 bit keyed hash of the data. pub fn sum_keyed256(data []u8, key []u8) []u8 { mut d := Digest.new_keyed_hash(key) or { panic(err) } d.write(data) or { panic(err) } - return d.checksum_internal(blake3.size256) + return d.checksum_internal(size256) } // sum_derived_key256 returns the Blake3 256 bit derived key hash of the key material pub fn sum_derive_key256(context []u8, key_material []u8) []u8 { mut d := Digest.new_derive_key_hash(context) or { panic(err) } d.write(key_material) or { panic(err) } - return d.checksum_internal(blake3.size256) + return d.checksum_internal(size256) } diff --git a/vlib/crypto/blake3/blake3_chunk_test.v b/vlib/crypto/blake3/blake3_chunk_test.v index 209e4fa5ad7eec..6581d554118393 100644 --- a/vlib/crypto/blake3/blake3_chunk_test.v +++ b/vlib/crypto/blake3/blake3_chunk_test.v @@ -280,7 +280,7 @@ const test_cases = [ ] fn test_various_test_cases() { - for test_case in blake3.test_cases { + for test_case in test_cases { mut chunk := Chunk{} input := test_case.input chunk.process_input(input.input_string.bytes(), input.key_words, input.chunk_number, diff --git a/vlib/crypto/blake3/blake3_test.v b/vlib/crypto/blake3/blake3_test.v index ab192d89418e38..94fde8dba066c7 100644 --- a/vlib/crypto/blake3/blake3_test.v +++ b/vlib/crypto/blake3/blake3_test.v @@ -36,10 +36,10 @@ const data_segment = []u8{len: 251, cap: 251, init: u8(index)} fn test_run_test_vectors() { mut data := []u8{} for i in 0 .. 408 { - data << blake3.data_segment + data << data_segment } - for case in blake3.test_object.cases { + for case in test_object.cases { extended_length := u64(case.hash.len) // test Blake3 hash @@ -72,7 +72,7 @@ fn test_run_test_vectors() { // test Blake3 keyed hash - mut keyed_hash_d := Digest.new_keyed_hash(blake3.test_object.key.bytes()) or { + mut keyed_hash_d := Digest.new_keyed_hash(test_object.key.bytes()) or { assert false, 'Digest.new_keyed_hash error: ${err}' return } @@ -99,7 +99,7 @@ fn test_run_test_vectors() { assert keyed_hash_d.checksum(u64(keyed_hash_bytes.len)) == keyed_hash_bytes, 'keyed hash failed output length ${extended_length}' // test Blake3 derive key hash - mut derive_key_hash_d := Digest.new_derive_key_hash(blake3.test_object.context_string.bytes()) or { + mut derive_key_hash_d := Digest.new_derive_key_hash(test_object.context_string.bytes()) or { assert false, 'Digest.new_derive_key_hash error: ${err}' return } diff --git a/vlib/crypto/des/des.v b/vlib/crypto/des/des.v index 2df40afdc143ea..d96e4347161528 100644 --- a/vlib/crypto/des/des.v +++ b/vlib/crypto/des/des.v @@ -10,7 +10,7 @@ const block_size = 8 // A tripleDesCipher is an instance of TripleDES encryption. struct TripleDesCipher { - block_size int = des.block_size + block_size int = block_size mut: cipher1 DesCipher cipher2 DesCipher @@ -19,7 +19,7 @@ mut: // DesCipher is an instance of DES encryption. struct DesCipher { - block_size int = des.block_size + block_size int = block_size mut: subkeys [16]u64 } @@ -57,26 +57,26 @@ fn (mut c DesCipher) generate_subkeys(key_bytes []u8) { } pub fn (c &DesCipher) encrypt(mut dst []u8, src []u8) { - if src.len < des.block_size { + if src.len < block_size { panic('crypto/des: input not full block') } - if dst.len < des.block_size { + if dst.len < block_size { panic('crypto/des: output not full block') } - if subtle.inexact_overlap(dst[..des.block_size], src[..des.block_size]) { + if subtle.inexact_overlap(dst[..block_size], src[..block_size]) { panic('crypto/des: invalid buffer overlap') } encrypt_block(c.subkeys[..], mut dst, src) } pub fn (c &DesCipher) decrypt(mut dst []u8, src []u8) { - if src.len < des.block_size { + if src.len < block_size { panic('crypto/des: input not full block') } - if dst.len < des.block_size { + if dst.len < block_size { panic('crypto/des: output not full block') } - if subtle.inexact_overlap(dst[..des.block_size], src[..des.block_size]) { + if subtle.inexact_overlap(dst[..block_size], src[..block_size]) { panic('crypto/des: invalid buffer overlap') } decrypt_block(c.subkeys[..], mut dst, src) @@ -95,13 +95,13 @@ pub fn new_triple_des_cipher(key []u8) cipher.Block { } pub fn (c &TripleDesCipher) encrypt(mut dst []u8, src []u8) { - if src.len < des.block_size { + if src.len < block_size { panic('crypto/des: input not full block') } - if dst.len < des.block_size { + if dst.len < block_size { panic('crypto/des: output not full block') } - if subtle.inexact_overlap(dst[..des.block_size], src[..des.block_size]) { + if subtle.inexact_overlap(dst[..block_size], src[..block_size]) { panic('crypto/des: invalid buffer overlap') } @@ -131,13 +131,13 @@ pub fn (c &TripleDesCipher) encrypt(mut dst []u8, src []u8) { } pub fn (c &TripleDesCipher) decrypt(mut dst []u8, src []u8) { - if src.len < des.block_size { + if src.len < block_size { panic('crypto/des: input not full block') } - if dst.len < des.block_size { + if dst.len < block_size { panic('crypto/des: output not full block') } - if subtle.inexact_overlap(dst[..des.block_size], src[..des.block_size]) { + if subtle.inexact_overlap(dst[..block_size], src[..block_size]) { panic('crypto/des: invalid buffer overlap') } diff --git a/vlib/crypto/ed25519/ed25519.v b/vlib/crypto/ed25519/ed25519.v index f4c2b9daf33c3c..65467bf4dc8f73 100644 --- a/vlib/crypto/ed25519/ed25519.v +++ b/vlib/crypto/ed25519/ed25519.v @@ -31,15 +31,15 @@ pub type PrivateKey = []u8 // seed returns the private key seed corresponding to priv. // RFC 8032's private keys correspond to seeds in this module. pub fn (priv PrivateKey) seed() []u8 { - mut seed := []u8{len: ed25519.seed_size} + mut seed := []u8{len: seed_size} copy(mut seed, priv[..32]) return seed } // public_key returns the []u8 corresponding to priv. pub fn (priv PrivateKey) public_key() PublicKey { - assert priv.len == ed25519.private_key_size - mut publickey := []u8{len: ed25519.public_key_size} + assert priv.len == private_key_size + mut publickey := []u8{len: public_key_size} copy(mut publickey, priv[32..]) return PublicKey(publickey) } @@ -61,16 +61,16 @@ pub fn (priv PrivateKey) sign(message []u8) ![]u8 { // sign`signs the message with privatekey and returns a signature pub fn sign(privatekey PrivateKey, message []u8) ![]u8 { - mut signature := []u8{len: ed25519.signature_size} + mut signature := []u8{len: signature_size} sign_generic(mut signature, privatekey, message)! return signature } fn sign_generic(mut signature []u8, privatekey []u8, message []u8) ! { - if privatekey.len != ed25519.private_key_size { + if privatekey.len != private_key_size { panic('ed25519: bad private key length: ${privatekey.len}') } - seed, publickey := privatekey[..ed25519.seed_size], privatekey[ed25519.seed_size..] + seed, publickey := privatekey[..seed_size], privatekey[seed_size..] mut h := sha512.sum512(seed) mut s := edwards25519.new_scalar() @@ -109,11 +109,11 @@ fn sign_generic(mut signature []u8, privatekey []u8, message []u8) ! { // verify reports whether sig is a valid signature of message by publickey. pub fn verify(publickey PublicKey, message []u8, sig []u8) !bool { - if publickey.len != ed25519.public_key_size { + if publickey.len != public_key_size { return error('ed25519: bad public key length: ${publickey.len}') } - if sig.len != ed25519.signature_size || sig[63] & 224 != 0 { + if sig.len != signature_size || sig[63] & 224 != 0 { return false } @@ -145,10 +145,10 @@ pub fn verify(publickey PublicKey, message []u8, sig []u8) !bool { // generate_key generates a public/private key pair entropy using `crypto.rand`. pub fn generate_key() !(PublicKey, PrivateKey) { - mut seed := rand.bytes(ed25519.seed_size)! + mut seed := rand.bytes(seed_size)! privatekey := new_key_from_seed(seed) - mut publickey := []u8{len: ed25519.public_key_size} + mut publickey := []u8{len: public_key_size} copy(mut publickey, privatekey[32..]) return publickey, privatekey @@ -158,13 +158,13 @@ pub fn generate_key() !(PublicKey, PrivateKey) { // correspond to seeds in this module pub fn new_key_from_seed(seed []u8) PrivateKey { // Outline the function body so that the returned key can be stack-allocated. - mut privatekey := []u8{len: ed25519.private_key_size} + mut privatekey := []u8{len: private_key_size} new_key_from_seed_generic(mut privatekey, seed) return PrivateKey(privatekey) } fn new_key_from_seed_generic(mut privatekey []u8, seed []u8) { - if seed.len != ed25519.seed_size { + if seed.len != seed_size { panic('ed25519: bad seed length: ${seed.len}') } diff --git a/vlib/crypto/ed25519/internal/edwards25519/element.v b/vlib/crypto/ed25519/internal/edwards25519/element.v index 7dae319462c30a..5f88a936df8fdc 100644 --- a/vlib/crypto/ed25519/internal/edwards25519/element.v +++ b/vlib/crypto/ed25519/internal/edwards25519/element.v @@ -198,11 +198,11 @@ fn fe_mul_generic(a Element, b Element) Element { c3 := shift_right_by_51(r3) c4 := shift_right_by_51(r4) - rr0 := r0.lo & edwards25519.mask_low_51_bits + c4 * 19 - rr1 := r1.lo & edwards25519.mask_low_51_bits + c0 - rr2 := r2.lo & edwards25519.mask_low_51_bits + c1 - rr3 := r3.lo & edwards25519.mask_low_51_bits + c2 - rr4 := r4.lo & edwards25519.mask_low_51_bits + c3 + rr0 := r0.lo & mask_low_51_bits + c4 * 19 + rr1 := r1.lo & mask_low_51_bits + c0 + rr2 := r2.lo & mask_low_51_bits + c1 + rr3 := r3.lo & mask_low_51_bits + c2 + rr4 := r4.lo & mask_low_51_bits + c3 // Now all coefficients fit into 64-bit registers but are still too large to // be passed around as a Element. We therefore do one last carry chain, @@ -229,11 +229,11 @@ fn (mut v Element) carry_propagate_generic() Element { c3 := v.l3 >> 51 c4 := v.l4 >> 51 - v.l0 = v.l0 & edwards25519.mask_low_51_bits + c4 * 19 - v.l1 = v.l1 & edwards25519.mask_low_51_bits + c0 - v.l2 = v.l2 & edwards25519.mask_low_51_bits + c1 - v.l3 = v.l3 & edwards25519.mask_low_51_bits + c2 - v.l4 = v.l4 & edwards25519.mask_low_51_bits + c3 + v.l0 = v.l0 & mask_low_51_bits + c4 * 19 + v.l1 = v.l1 & mask_low_51_bits + c0 + v.l2 = v.l2 & mask_low_51_bits + c1 + v.l3 = v.l3 & mask_low_51_bits + c2 + v.l4 = v.l4 & mask_low_51_bits + c3 return v } @@ -310,11 +310,11 @@ fn fe_square_generic(a Element) Element { c3 := shift_right_by_51(r3) c4 := shift_right_by_51(r4) - rr0 := r0.lo & edwards25519.mask_low_51_bits + c4 * 19 - rr1 := r1.lo & edwards25519.mask_low_51_bits + c0 - rr2 := r2.lo & edwards25519.mask_low_51_bits + c1 - rr3 := r3.lo & edwards25519.mask_low_51_bits + c2 - rr4 := r4.lo & edwards25519.mask_low_51_bits + c3 + rr0 := r0.lo & mask_low_51_bits + c4 * 19 + rr1 := r1.lo & mask_low_51_bits + c0 + rr2 := r2.lo & mask_low_51_bits + c1 + rr3 := r3.lo & mask_low_51_bits + c2 + rr4 := r4.lo & mask_low_51_bits + c3 mut v := Element{ l0: rr0 @@ -329,13 +329,13 @@ fn fe_square_generic(a Element) Element { // zero sets v = 0, and returns v. pub fn (mut v Element) zero() Element { - v = edwards25519.fe_zero + v = fe_zero return v } // one sets v = 1, and returns v. pub fn (mut v Element) one() Element { - v = edwards25519.fe_one + v = fe_one return v } @@ -359,15 +359,15 @@ pub fn (mut v Element) reduce() Element { v.l0 += 19 * c v.l1 += v.l0 >> 51 - v.l0 = v.l0 & edwards25519.mask_low_51_bits + v.l0 = v.l0 & mask_low_51_bits v.l2 += v.l1 >> 51 - v.l1 = v.l1 & edwards25519.mask_low_51_bits + v.l1 = v.l1 & mask_low_51_bits v.l3 += v.l2 >> 51 - v.l2 = v.l2 & edwards25519.mask_low_51_bits + v.l2 = v.l2 & mask_low_51_bits v.l4 += v.l3 >> 51 - v.l3 = v.l3 & edwards25519.mask_low_51_bits + v.l3 = v.l3 & mask_low_51_bits // no additional carry - v.l4 = v.l4 & edwards25519.mask_low_51_bits + v.l4 = v.l4 & mask_low_51_bits return v } @@ -400,7 +400,7 @@ pub fn (mut v Element) subtract(a Element, b Element) Element { // negate sets v = -a, and returns v. pub fn (mut v Element) negate(a Element) Element { - return v.subtract(edwards25519.fe_zero, a) + return v.subtract(fe_zero, a) } // invert sets v = 1/z mod p, and returns v. @@ -493,7 +493,7 @@ pub fn (mut v Element) multiply(x Element, y Element) Element { // mul_51 returns lo + hi * 2⁵¹ = a * b. fn mul_51(a u64, b u32) (u64, u64) { mh, ml := bits.mul_64(a, u64(b)) - lo := ml & edwards25519.mask_low_51_bits + lo := ml & mask_low_51_bits hi := (mh << 13) | (ml >> 51) return lo, hi } @@ -568,9 +568,9 @@ pub fn (mut r Element) sqrt_ratio(u Element, v Element) (Element, int) { mut uneg := b.negate(u) correct_sign_sqrt := check.equal(u) flipped_sign_sqrt := check.equal(uneg) - flipped_sign_sqrt_i := check.equal(uneg.multiply(uneg, edwards25519.sqrt_m1)) + flipped_sign_sqrt_i := check.equal(uneg.multiply(uneg, sqrt_m1)) - rprime := b.multiply(r, edwards25519.sqrt_m1) // r_prime = SQRT_M1 * r + rprime := b.multiply(r, sqrt_m1) // r_prime = SQRT_M1 * r // r = CT_selected(r_prime IF flipped_sign_sqrt | flipped_sign_sqrt_i ELSE r) r.selected(rprime, r, flipped_sign_sqrt | flipped_sign_sqrt_i) @@ -629,20 +629,20 @@ pub fn (mut v Element) set_bytes(x []u8) !Element { // Bits 0:51 (bytes 0:8, bits 0:64, shift 0, mask 51). v.l0 = binary.little_endian_u64(x[0..8]) - v.l0 &= edwards25519.mask_low_51_bits + v.l0 &= mask_low_51_bits // Bits 51:102 (bytes 6:14, bits 48:112, shift 3, mask 51). v.l1 = binary.little_endian_u64(x[6..14]) >> 3 - v.l1 &= edwards25519.mask_low_51_bits + v.l1 &= mask_low_51_bits // Bits 102:153 (bytes 12:20, bits 96:160, shift 6, mask 51). v.l2 = binary.little_endian_u64(x[12..20]) >> 6 - v.l2 &= edwards25519.mask_low_51_bits + v.l2 &= mask_low_51_bits // Bits 153:204 (bytes 19:27, bits 152:216, shift 1, mask 51). v.l3 = binary.little_endian_u64(x[19..27]) >> 1 - v.l3 &= edwards25519.mask_low_51_bits + v.l3 &= mask_low_51_bits // Bits 204:251 (bytes 24:32, bits 192:256, shift 12, mask 51). // Note: not bytes 25:33, shift 4, to avoid overread. v.l4 = binary.little_endian_u64(x[24..32]) >> 12 - v.l4 &= edwards25519.mask_low_51_bits + v.l4 &= mask_low_51_bits return v } diff --git a/vlib/crypto/ed25519/internal/edwards25519/element_test.v b/vlib/crypto/ed25519/internal/edwards25519/element_test.v index 07d2896a258eb2..cfa35bdf2632c5 100644 --- a/vlib/crypto/ed25519/internal/edwards25519/element_test.v +++ b/vlib/crypto/ed25519/internal/edwards25519/element_test.v @@ -9,7 +9,7 @@ import encoding.hex const github_job = os.getenv('GITHUB_JOB') fn testsuite_begin() { - if edwards25519.github_job != '' { + if github_job != '' { // ensure that the CI does not run flaky tests: rand.seed([u32(0xffff24), 0xabcd]) } @@ -23,11 +23,11 @@ const mask_low_52_bits = (u64(1) << 52) - 1 fn generate_field_element() Element { return Element{ - l0: rand.u64() & edwards25519.mask_low_52_bits - l1: rand.u64() & edwards25519.mask_low_52_bits - l2: rand.u64() & edwards25519.mask_low_52_bits - l3: rand.u64() & edwards25519.mask_low_52_bits - l4: rand.u64() & edwards25519.mask_low_52_bits + l0: rand.u64() & mask_low_52_bits + l1: rand.u64() & mask_low_52_bits + l2: rand.u64() & mask_low_52_bits + l3: rand.u64() & mask_low_52_bits + l4: rand.u64() & mask_low_52_bits } } @@ -80,11 +80,11 @@ const weird_limbs_52 = [ fn generate_weird_field_element() Element { return Element{ - l0: edwards25519.weird_limbs_52[rand.intn(edwards25519.weird_limbs_52.len) or { 0 }] - l1: edwards25519.weird_limbs_51[rand.intn(edwards25519.weird_limbs_51.len) or { 0 }] - l2: edwards25519.weird_limbs_51[rand.intn(edwards25519.weird_limbs_51.len) or { 0 }] - l3: edwards25519.weird_limbs_51[rand.intn(edwards25519.weird_limbs_51.len) or { 0 }] - l4: edwards25519.weird_limbs_51[rand.intn(edwards25519.weird_limbs_51.len) or { 0 }] + l0: weird_limbs_52[rand.intn(weird_limbs_52.len) or { 0 }] + l1: weird_limbs_51[rand.intn(weird_limbs_51.len) or { 0 }] + l2: weird_limbs_51[rand.intn(weird_limbs_51.len) or { 0 }] + l3: weird_limbs_51[rand.intn(weird_limbs_51.len) or { 0 }] + l4: weird_limbs_51[rand.intn(weird_limbs_51.len) or { 0 }] } } diff --git a/vlib/crypto/ed25519/internal/edwards25519/extra_test.v b/vlib/crypto/ed25519/internal/edwards25519/extra_test.v index dd61e418fd4398..753da166eea49c 100644 --- a/vlib/crypto/ed25519/internal/edwards25519/extra_test.v +++ b/vlib/crypto/ed25519/internal/edwards25519/extra_test.v @@ -7,7 +7,7 @@ import encoding.hex const github_job = os.getenv('GITHUB_JOB') fn testsuite_begin() { - if edwards25519.github_job != '' { + if github_job != '' { // ensure that the CI does not run flaky tests: rand.seed([u32(0xffff24), 0xabcd]) } @@ -73,7 +73,7 @@ fn fn_cofactor(mut data []u8) bool { panic('data.len should be 64') } mut loworder := Point{} - loworder.set_bytes(edwards25519.loworder_bytes) or { panic(err) } + loworder.set_bytes(loworder_bytes) or { panic(err) } mut s := new_scalar() mut p := Point{} diff --git a/vlib/crypto/ed25519/internal/edwards25519/point.v b/vlib/crypto/ed25519/internal/edwards25519/point.v index ded7aaab6637ad..3b1f0775308da3 100644 --- a/vlib/crypto/ed25519/internal/edwards25519/point.v +++ b/vlib/crypto/ed25519/internal/edwards25519/point.v @@ -18,20 +18,20 @@ const gen_point = generator() or { panic(err) } fn d_const_generate() !Element { mut v := Element{} - v.set_bytes(edwards25519.d_bytes)! + v.set_bytes(d_bytes)! return v } fn d2_const_generate() !Element { mut v := Element{} - v.add(edwards25519.d_const, edwards25519.d_const) + v.add(d_const, d_const) return v } // id_point_generate is the point at infinity. fn id_point_generate() !Point { mut p := Point{} - p.set_bytes(edwards25519.id_bytes)! + p.set_bytes(id_bytes)! return p } @@ -39,7 +39,7 @@ fn id_point_generate() !Point { // correspondence of this encoding with the values in RFC 8032. fn generator() !Point { mut p := Point{} - p.set_bytes(edwards25519.gen_bytes)! + p.set_bytes(gen_bytes)! return p } @@ -139,7 +139,7 @@ pub fn (mut v Point) set_bytes(x []u8) !Point { // v = dy² + 1 mut el3 := Element{} - mut vv := el3.multiply(y2, edwards25519.d_const) + mut vv := el3.multiply(y2, d_const) vv = vv.add(vv, fe_one) // x = +√(u/v) @@ -171,13 +171,13 @@ pub fn (mut v Point) set(u Point) Point { // new_identity_point returns a new Point set to the identity. pub fn new_identity_point() Point { mut p := Point{} - return p.set(edwards25519.id_point) + return p.set(id_point) } // new_generator_point returns a new Point set to the canonical generator. pub fn new_generator_point() Point { mut p := Point{} - return p.set(edwards25519.gen_point) + return p.set(gen_point) } fn (mut v ProjectiveCached) zero() ProjectiveCached { @@ -276,14 +276,14 @@ fn (mut v ProjectiveCached) from_p3(p Point) ProjectiveCached { v.ypx.add(p.y, p.x) v.ymx.subtract(p.y, p.x) v.z.set(p.z) - v.t2d.multiply(p.t, edwards25519.d2_const) + v.t2d.multiply(p.t, d2_const) return v } fn (mut v AffineCached) from_p3(p Point) AffineCached { v.ypx.add(p.y, p.x) v.ymx.subtract(p.y, p.x) - v.t2d.multiply(p.t, edwards25519.d2_const) + v.t2d.multiply(p.t, d2_const) mut invz := Element{} invz.invert(p.z) @@ -521,7 +521,7 @@ fn check_on_curve(points ...Point) bool { mut rhs := Element{} lhs.subtract(yy, xx) lhs.multiply(lhs, zz) - rhs.multiply(edwards25519.d_const, xx) + rhs.multiply(d_const, xx) rhs.multiply(rhs, yy) rhs.add(rhs, zzzz) diff --git a/vlib/crypto/ed25519/internal/edwards25519/point_test.v b/vlib/crypto/ed25519/internal/edwards25519/point_test.v index 2960b7716fc84a..115d1e3bcbfcef 100644 --- a/vlib/crypto/ed25519/internal/edwards25519/point_test.v +++ b/vlib/crypto/ed25519/internal/edwards25519/point_test.v @@ -10,8 +10,8 @@ fn test_invalid_encodings() { inv_bytes := hex.decode(invalid) or { panic(err) } mut p := new_generator_point() - out := p.set_bytes(inv_bytes) or { edwards25519.zero_point } - assert out == edwards25519.zero_point + out := p.set_bytes(inv_bytes) or { zero_point } + assert out == zero_point // assert p.equal(bgp) == 1 //not makes sense when error assert check_on_curve(p) == true diff --git a/vlib/crypto/ed25519/internal/edwards25519/scalar.v b/vlib/crypto/ed25519/internal/edwards25519/scalar.v index 5b09b842ccf942..cb0b63fddbb2bd 100644 --- a/vlib/crypto/ed25519/internal/edwards25519/scalar.v +++ b/vlib/crypto/ed25519/internal/edwards25519/scalar.v @@ -44,7 +44,7 @@ pub fn new_scalar() Scalar { // add sets s = x + y mod l, and returns s. pub fn (mut s Scalar) add(x Scalar, y Scalar) Scalar { // s = 1 * x + y mod l - sc_mul_add(mut s.s, edwards25519.sc_one.s, x.s, y.s) + sc_mul_add(mut s.s, sc_one.s, x.s, y.s) return s } @@ -57,21 +57,21 @@ pub fn (mut s Scalar) multiply_add(x Scalar, y Scalar, z Scalar) Scalar { // subtract sets s = x - y mod l, and returns s. pub fn (mut s Scalar) subtract(x Scalar, y Scalar) Scalar { // s = -1 * y + x mod l - sc_mul_add(mut s.s, edwards25519.sc_minus_one.s, y.s, x.s) + sc_mul_add(mut s.s, sc_minus_one.s, y.s, x.s) return s } // negate sets s = -x mod l, and returns s. pub fn (mut s Scalar) negate(x Scalar) Scalar { // s = -1 * x + 0 mod l - sc_mul_add(mut s.s, edwards25519.sc_minus_one.s, x.s, edwards25519.sc_zero.s) + sc_mul_add(mut s.s, sc_minus_one.s, x.s, sc_zero.s) return s } // multiply sets s = x * y mod l, and returns s. pub fn (mut s Scalar) multiply(x Scalar, y Scalar) Scalar { // s = x * y + 0 mod l - sc_mul_add(mut s.s, x.s, y.s, edwards25519.sc_zero.s) + sc_mul_add(mut s.s, x.s, y.s, sc_zero.s) return s } @@ -121,10 +121,10 @@ pub fn (mut s Scalar) set_canonical_bytes(x []u8) !Scalar { // is_reduced returns whether the given scalar is reduced modulo l. fn is_reduced(s Scalar) bool { for i := s.s.len - 1; i >= 0; i-- { - if s.s[i] > edwards25519.sc_minus_one.s[i] { + if s.s[i] > sc_minus_one.s[i] { return false } - if s.s[i] < edwards25519.sc_minus_one.s[i] { + if s.s[i] < sc_minus_one.s[i] { return true } /* @@ -1096,7 +1096,7 @@ fn generate_scalar(size int) !Scalar { } return reflect.ValueOf(s) */ - mut s := edwards25519.sc_zero + mut s := sc_zero diceroll := rand.intn(100) or { 0 } match true { /* @@ -1104,10 +1104,10 @@ fn generate_scalar(size int) !Scalar { case diceroll == 1: */ diceroll == 0 || diceroll == 1 { - s = edwards25519.sc_one + s = sc_one } diceroll == 2 { - s = edwards25519.sc_minus_one + s = sc_minus_one } diceroll < 5 { // rand.Read(s.s[:16]) // read random bytes and fill buf @@ -1164,7 +1164,7 @@ type NotZeroScalar = Scalar fn generate_notzero_scalar(size int) !NotZeroScalar { mut s := Scalar{} - for s == edwards25519.sc_zero { + for s == sc_zero { s = generate_scalar(size)! } return NotZeroScalar(s) diff --git a/vlib/crypto/ed25519/internal/edwards25519/scalar_test.v b/vlib/crypto/ed25519/internal/edwards25519/scalar_test.v index 6ea261c8e7b9e6..3d1ff2619e57e0 100644 --- a/vlib/crypto/ed25519/internal/edwards25519/scalar_test.v +++ b/vlib/crypto/ed25519/internal/edwards25519/scalar_test.v @@ -8,7 +8,7 @@ import math.big const github_job = os.getenv('GITHUB_JOB') fn testsuite_begin() { - if edwards25519.github_job != '' { + if github_job != '' { // ensure that the CI does not run flaky tests: rand.seed([u32(0xffff24), 0xabcd]) } @@ -108,8 +108,8 @@ fn test_scalar_set_canonical_bytes_on_noncanonical_value() { b[31] += 1 mut s := sc_one - out := s.set_canonical_bytes(b[..]) or { edwards25519.sc_error } // set_canonical_bytes shouldn't worked on a non-canonical value" - assert out == edwards25519.sc_error + out := s.set_canonical_bytes(b[..]) or { sc_error } // set_canonical_bytes shouldn't worked on a non-canonical value" + assert out == sc_error assert s == sc_one } diff --git a/vlib/crypto/ed25519/internal/edwards25519/scalarmult_test.v b/vlib/crypto/ed25519/internal/edwards25519/scalarmult_test.v index 2c99a91d703511..4ea16702e81e21 100644 --- a/vlib/crypto/ed25519/internal/edwards25519/scalarmult_test.v +++ b/vlib/crypto/ed25519/internal/edwards25519/scalarmult_test.v @@ -8,7 +8,7 @@ const dsc_basepoint = [u8(0xf4), 0xef, 0x7c, 0xa, 0x34, 0x55, 0x7b, 0x9f, 0x72, fn dalek_scalar_basepoint() Point { mut p := Point{} - p.set_bytes(edwards25519.dsc_basepoint) or { panic(err) } + p.set_bytes(dsc_basepoint) or { panic(err) } return p } @@ -33,7 +33,7 @@ fn test_scalar_mult_small_scalars() { fn test_scalar_mult_vs_dalek() { mut p := Point{} mut b := new_generator_point() - mut dsc := edwards25519.dalek_scalar + mut dsc := dalek_scalar p.scalar_mult(mut dsc, b) mut ds := dalek_scalar_basepoint() assert ds.equal(p) == 1 @@ -43,7 +43,7 @@ fn test_scalar_mult_vs_dalek() { fn test_scalar_base_mult_vs_dalek() { mut p := Point{} - mut dsc := edwards25519.dalek_scalar + mut dsc := dalek_scalar p.scalar_base_mult(mut dsc) mut ds := dalek_scalar_basepoint() assert ds.equal(p) == 1 @@ -55,13 +55,13 @@ fn test_vartime_double_basemult_vs_dalek() { mut p := Point{} mut z := Scalar{} b := new_generator_point() - p.vartime_double_scalar_base_mult(edwards25519.dalek_scalar, b, z) + p.vartime_double_scalar_base_mult(dalek_scalar, b, z) mut ds := dalek_scalar_basepoint() assert ds.equal(p) == 1 assert check_on_curve(p) - p.vartime_double_scalar_base_mult(z, b, edwards25519.dalek_scalar) + p.vartime_double_scalar_base_mult(z, b, dalek_scalar) assert ds.equal(p) == 1 assert check_on_curve(p) diff --git a/vlib/crypto/hmac/hmac.v b/vlib/crypto/hmac/hmac.v index 104ea973605fdb..2b54651bd073b2 100644 --- a/vlib/crypto/hmac/hmac.v +++ b/vlib/crypto/hmac/hmac.v @@ -18,16 +18,16 @@ pub fn new(key []u8, data []u8, hash_func fn ([]u8) []u8, blocksize int) []u8 { b_key = hash_func(key) } if b_key.len < blocksize { - b_key << hmac.npad[..blocksize - b_key.len] + b_key << npad[..blocksize - b_key.len] } mut inner := []u8{} - for i, b in hmac.ipad[..blocksize] { + for i, b in ipad[..blocksize] { inner << b_key[i] ^ b } inner << data inner_hash := hash_func(inner) mut outer := []u8{cap: b_key.len} - for i, b in hmac.opad[..blocksize] { + for i, b in opad[..blocksize] { outer << b_key[i] ^ b } outer << inner_hash diff --git a/vlib/crypto/hmac/hmac_test.v b/vlib/crypto/hmac/hmac_test.v index 9c7ec519aa593b..2e3d6c58237915 100644 --- a/vlib/crypto/hmac/hmac_test.v +++ b/vlib/crypto/hmac/hmac_test.v @@ -60,8 +60,8 @@ fn test_hmac_md5() { '6f630fad67cda0ee1fb1f562db3aa53e', ] mut result := '' - for i, key in hmac.keys { - result = new(key, hmac.data[i], md5.sum, md5.block_size).hex() + for i, key in keys { + result = new(key, data[i], md5.sum, md5.block_size).hex() assert result == md5_expected_results[i] } } @@ -77,8 +77,8 @@ fn test_hmac_sha1() { 'e8e99d0f45237d786d6bbaa7965c7808bbff1a91', ] mut result := '' - for i, key in hmac.keys { - result = new(key, hmac.data[i], sha1.sum, sha1.block_size).hex() + for i, key in keys { + result = new(key, data[i], sha1.sum, sha1.block_size).hex() assert result == sha1_expected_results[i] } } @@ -94,8 +94,8 @@ fn test_hmac_sha224() { '7358939e58683a448ac5065196d33191a1c1d33d4b8b0304dc60f5e0', ] mut result := '' - for i, key in hmac.keys { - result = new(key, hmac.data[i], sha256.sum224, sha256.block_size).hex() + for i, key in keys { + result = new(key, data[i], sha256.sum224, sha256.block_size).hex() assert result == sha224_expected_results[i] } } @@ -111,8 +111,8 @@ fn test_hmac_sha256() { '6355ac22e890d0a3c8481a5ca4825bc884d3e7a1ff98a2fc2ac7d8e064c3b2e6', ] mut result := '' - for i, key in hmac.keys { - result = new(key, hmac.data[i], sha256.sum, sha256.block_size).hex() + for i, key in keys { + result = new(key, data[i], sha256.sum, sha256.block_size).hex() assert result == sha256_expected_results[i] } } @@ -128,8 +128,8 @@ fn test_hmac_sha384() { '34f065bdedc2487c30a634d9a49cf42116f78bb386ea4d498aea05c0077f05373cfdaa9b59a7b0481bced9e3f55016a9', ] mut result := '' - for i, key in hmac.keys { - result = new(key, hmac.data[i], sha512.sum384, sha512.block_size).hex() + for i, key in keys { + result = new(key, data[i], sha512.sum384, sha512.block_size).hex() assert result == sha384_expected_results[i] } } @@ -145,8 +145,8 @@ fn test_hmac_sha512() { '09441cda584ed2f4d2f5b519c71baf3c79cce19dfc89a548e73b3bb382a9124d6e792b77bf57903ff5858e5d111d15f45d6fd118eea023f28d2eb234ebe62f85', ] mut result := '' - for i, key in hmac.keys { - result = new(key, hmac.data[i], sha512.sum512, sha512.block_size).hex() + for i, key in keys { + result = new(key, data[i], sha512.sum512, sha512.block_size).hex() assert result == sha512_expected_results[i] } } @@ -162,8 +162,8 @@ fn test_hmac_blake2s_256() { '467201ef5997a3442932b318083488cf9aa1d89bef2146154b4816d34863e33d', ] mut result := '' - for i, key in hmac.keys { - result = new(key, hmac.data[i], blake2s.sum256, blake2s.block_size).hex() + for i, key in keys { + result = new(key, data[i], blake2s.sum256, blake2s.block_size).hex() assert result == blake2s_256_expected_results[i] } } @@ -179,8 +179,8 @@ fn test_hmac_blake2s_224() { '17b9ebb1426a5a3dd6aa91567bd9cb9c19b3dc007adb726e55b98926', ] mut result := '' - for i, key in hmac.keys { - result = new(key, hmac.data[i], blake2s.sum224, blake2s.block_size).hex() + for i, key in keys { + result = new(key, data[i], blake2s.sum224, blake2s.block_size).hex() assert result == blake2s_224_expected_results[i] } } @@ -196,8 +196,8 @@ fn test_hmac_blake2s_160() { '6f3127fcba040fe6ea552b22c39b0fd83abca19a', ] mut result := '' - for i, key in hmac.keys { - result = new(key, hmac.data[i], blake2s.sum160, blake2s.block_size).hex() + for i, key in keys { + result = new(key, data[i], blake2s.sum160, blake2s.block_size).hex() assert result == blake2s_160_expected_results[i] } } @@ -213,8 +213,8 @@ fn test_hmac_blake2s_128() { '96a72e3adf5e0b02d4e6d4e8a7342a77', ] mut result := '' - for i, key in hmac.keys { - result = new(key, hmac.data[i], blake2s.sum128, blake2s.block_size).hex() + for i, key in keys { + result = new(key, data[i], blake2s.sum128, blake2s.block_size).hex() assert result == blake2s_128_expected_results[i] } } @@ -230,8 +230,8 @@ fn test_hmac_blake2b_512() { 'f1c9b64e121330c512dc31e0d4a2fc84b7ca5be64e08934a7fc4640c4a1f5cc3c1f34d811c8079cc2df65a4e5d68baf833a1ec558546abeaa7d564840618db7b', ] mut result := '' - for i, key in hmac.keys { - result = new(key, hmac.data[i], blake2b.sum512, blake2b.block_size).hex() + for i, key in keys { + result = new(key, data[i], blake2b.sum512, blake2b.block_size).hex() assert result == blake2b_512_expected_results[i] } } @@ -247,8 +247,8 @@ fn test_hmac_blake2b_384() { 'c9d0155de83454f0720b5310b4b891ddc9ab702b8260b15aa6f7291efec95b7e7a2c986019814b7c28c105c22f0ef961', ] mut result := '' - for i, key in hmac.keys { - result = new(key, hmac.data[i], blake2b.sum384, blake2b.block_size).hex() + for i, key in keys { + result = new(key, data[i], blake2b.sum384, blake2b.block_size).hex() assert result == blake2b_384_expected_results[i] } } @@ -264,8 +264,8 @@ fn test_hmac_blake2b_256() { 'dce7f41e3db51656ffc97259ca0ef3358cbfb41ac3e74e2dd9cd8639ab4996a0', ] mut result := '' - for i, key in hmac.keys { - result = new(key, hmac.data[i], blake2b.sum256, blake2b.block_size).hex() + for i, key in keys { + result = new(key, data[i], blake2b.sum256, blake2b.block_size).hex() assert result == blake2b_256_expected_results[i] } } @@ -281,8 +281,8 @@ fn test_hmac_blake2b_160() { 'fc5fb8ec933174d97c7712fa8f8802467ac42b1e', ] mut result := '' - for i, key in hmac.keys { - result = new(key, hmac.data[i], blake2b.sum160, blake2b.block_size).hex() + for i, key in keys { + result = new(key, data[i], blake2b.sum160, blake2b.block_size).hex() assert result == blake2b_160_expected_results[i] } } @@ -298,8 +298,8 @@ fn test_hmac_blake3_256() { 'dac8165b07656b282c5b9f2f2cf22569560778cb6240b11a383f2bf466f1ba36', ] mut result := '' - for i, key in hmac.keys { - result = new(key, hmac.data[i], blake3.sum256, blake3.block_size).hex() + for i, key in keys { + result = new(key, data[i], blake3.sum256, blake3.block_size).hex() assert result == blake3_256_expected_results[i] } } @@ -316,8 +316,8 @@ fn test_hmac_sha3_512() { ] mut result := '' - for i, key in hmac.keys { - result = new(key, hmac.data[i], sha3.sum512, sha3.rate_512).hex() + for i, key in keys { + result = new(key, data[i], sha3.sum512, sha3.rate_512).hex() assert result == sha3_512_expected_results[i] } } @@ -334,8 +334,8 @@ fn test_hmac_sha3_384() { ] mut result := '' - for i, key in hmac.keys { - result = new(key, hmac.data[i], sha3.sum384, sha3.rate_384).hex() + for i, key in keys { + result = new(key, data[i], sha3.sum384, sha3.rate_384).hex() assert result == sha3_384_expected_results[i] } } @@ -352,8 +352,8 @@ fn test_hmac_sha3_256() { ] mut result := '' - for i, key in hmac.keys { - result = new(key, hmac.data[i], sha3.sum256, sha3.rate_256).hex() + for i, key in keys { + result = new(key, data[i], sha3.sum256, sha3.rate_256).hex() assert result == sha3_256_expected_results[i] } } @@ -370,8 +370,8 @@ fn test_hmac_sha3_224() { ] mut result := '' - for i, key in hmac.keys { - result = new(key, hmac.data[i], sha3.sum224, sha3.rate_224).hex() + for i, key in keys { + result = new(key, data[i], sha3.sum224, sha3.rate_224).hex() assert result == sha3_224_expected_results[i] } } diff --git a/vlib/crypto/md5/md5.v b/vlib/crypto/md5/md5.v index 143c89b9b1f352..ac3b05b3be88e0 100644 --- a/vlib/crypto/md5/md5.v +++ b/vlib/crypto/md5/md5.v @@ -40,16 +40,16 @@ pub fn (mut d Digest) free() { fn (mut d Digest) init() { d.s = []u32{len: (4)} - d.x = []u8{len: md5.block_size} + d.x = []u8{len: block_size} d.reset() } // reset the state of the Digest `d` pub fn (mut d Digest) reset() { - d.s[0] = u32(md5.init0) - d.s[1] = u32(md5.init1) - d.s[2] = u32(md5.init2) - d.s[3] = u32(md5.init3) + d.s[0] = u32(init0) + d.s[1] = u32(init1) + d.s[2] = u32(init2) + d.s[3] = u32(init3) d.nx = 0 d.len = 0 } @@ -78,7 +78,7 @@ pub fn (mut d Digest) write(p_ []u8) !int { if d.nx > 0 { n := copy(mut d.x[d.nx..], p) d.nx += n - if d.nx == md5.block_size { + if d.nx == block_size { block(mut d, d.x) d.nx = 0 } @@ -88,8 +88,8 @@ pub fn (mut d Digest) write(p_ []u8) !int { p = p[n..] } } - if p.len >= md5.block_size { - n := p.len & ~(md5.block_size - 1) + if p.len >= block_size { + n := p.len & ~(block_size - 1) block(mut d, p[..n]) if n >= p.len { p = [] @@ -135,7 +135,7 @@ fn (mut d Digest) checksum_internal() []u8 { if d.nx != 0 { panic('d.nx != 0') } - mut digest := []u8{len: md5.size} + mut digest := []u8{len: size} binary.little_endian_put_u32(mut digest, d.s[0]) binary.little_endian_put_u32(mut digest[4..], d.s[1]) binary.little_endian_put_u32(mut digest[8..], d.s[2]) @@ -166,12 +166,12 @@ fn block(mut dig Digest, p []u8) { // size returns the size of the checksum in bytes. pub fn (d &Digest) size() int { - return md5.size + return size } // block_size returns the block size of the checksum in bytes. pub fn (d &Digest) block_size() int { - return md5.block_size + return block_size } // hexhash returns a hexadecimal MD5 hash sum `string` of `s`. diff --git a/vlib/crypto/pem/pem_test.v b/vlib/crypto/pem/pem_test.v index 38ad8135fc396e..8dc7571f8d4450 100644 --- a/vlib/crypto/pem/pem_test.v +++ b/vlib/crypto/pem/pem_test.v @@ -2,55 +2,55 @@ module pem // example PEM structures from the RFC fn test_decode_rfc1421() { - for i in 0 .. pem.test_data_rfc1421.len { - decoded, rest := decode(pem.test_data_rfc1421[i]) or { Block{}, '' } - assert decoded == pem.expected_results_rfc1421[i] - assert decoded == decode_only(pem.test_data_rfc1421[i]) or { Block{} } + for i in 0 .. test_data_rfc1421.len { + decoded, rest := decode(test_data_rfc1421[i]) or { Block{}, '' } + assert decoded == expected_results_rfc1421[i] + assert decoded == decode_only(test_data_rfc1421[i]) or { Block{} } assert rest == '' } } fn test_decode() { - for i in 0 .. pem.test_data.len { - decoded, rest := decode(pem.test_data[i]) or { Block{}, '' } - assert decoded == pem.expected_results[i] - assert decoded == decode_only(pem.test_data[i]) or { Block{} } - assert rest == pem.expected_rest[i] + for i in 0 .. test_data.len { + decoded, rest := decode(test_data[i]) or { Block{}, '' } + assert decoded == expected_results[i] + assert decoded == decode_only(test_data[i]) or { Block{} } + assert rest == expected_rest[i] } } fn test_encode_rfc1421() { - for i in 0 .. pem.test_data_rfc1421.len { - encoded := pem.expected_results_rfc1421[i].encode() or { '' } + for i in 0 .. test_data_rfc1421.len { + encoded := expected_results_rfc1421[i].encode() or { '' } decoded, rest := decode(encoded) or { Block{}, '' } assert rest == '' - assert decoded == pem.expected_results_rfc1421[i] + assert decoded == expected_results_rfc1421[i] assert decoded == decode_only(encoded) or { Block{} } } } fn test_encode() { - for i in 0 .. pem.test_data.len { - encoded := pem.expected_results[i].encode() or { '' } + for i in 0 .. test_data.len { + encoded := expected_results[i].encode() or { '' } decoded, rest := decode(encoded) or { Block{}, '' } assert rest == '' - assert decoded == pem.expected_results[i] + assert decoded == expected_results[i] assert decoded == decode_only(encoded) or { Block{} } } } fn test_encode_config() { - for i in 0 .. pem.test_data.len { - encoded := pem.expected_results[i].encode(EncodeConfig{31, '\r\n'}) or { '' } + for i in 0 .. test_data.len { + encoded := expected_results[i].encode(EncodeConfig{31, '\r\n'}) or { '' } decoded, rest := decode(encoded) or { Block{}, '' } assert rest == '' - assert decoded == pem.expected_results[i] + assert decoded == expected_results[i] assert decoded == decode_only(encoded) or { Block{} } } } fn test_decode_no_pem() { - for test in pem.test_data_no_pem { + for test in test_data_no_pem { if _, _ := decode(test) { assert false, 'decode should return `none` on input without PEM data' } diff --git a/vlib/crypto/rand/rand_linux.c.v b/vlib/crypto/rand/rand_linux.c.v index 0f4469d41a0826..2b2f053c50197e 100644 --- a/vlib/crypto/rand/rand_linux.c.v +++ b/vlib/crypto/rand/rand_linux.c.v @@ -14,8 +14,8 @@ pub fn read(bytes_needed int) ![]u8 { mut remaining_bytes := bytes_needed // getrandom syscall wont block if requesting <= 256 bytes for bytes_read < bytes_needed { - batch_size := if remaining_bytes > rand.read_batch_size { - rand.read_batch_size + batch_size := if remaining_bytes > read_batch_size { + read_batch_size } else { remaining_bytes } @@ -30,8 +30,8 @@ pub fn read(bytes_needed int) ![]u8 { } fn getrandom(bytes_needed int, buffer voidptr) int { - if bytes_needed > rand.read_batch_size { - panic('getrandom() dont request more than ${rand.read_batch_size} bytes at once.') + if bytes_needed > read_batch_size { + panic('getrandom() dont request more than ${read_batch_size} bytes at once.') } return unsafe { C.syscall(C.SYS_getrandom, buffer, bytes_needed, 0) } } diff --git a/vlib/crypto/rand/rand_solaris.c.v b/vlib/crypto/rand/rand_solaris.c.v index 454d54390d4f5f..58dfeda8802a2b 100644 --- a/vlib/crypto/rand/rand_solaris.c.v +++ b/vlib/crypto/rand/rand_solaris.c.v @@ -17,8 +17,8 @@ pub fn read(bytes_needed int) ![]u8 { mut remaining_bytes := bytes_needed // getrandom syscall wont block if requesting <= 256 bytes for bytes_read < bytes_needed { - batch_size := if remaining_bytes > rand.read_batch_size { - rand.read_batch_size + batch_size := if remaining_bytes > read_batch_size { + read_batch_size } else { remaining_bytes } @@ -33,8 +33,8 @@ pub fn read(bytes_needed int) ![]u8 { } fn v_getrandom(bytes_needed int, buffer voidptr) int { - if bytes_needed > rand.read_batch_size { - panic('getrandom() dont request more than ${rand.read_batch_size} bytes at once.') + if bytes_needed > read_batch_size { + panic('getrandom() dont request more than ${read_batch_size} bytes at once.') } return C.getrandom(buffer, bytes_needed, 0) } diff --git a/vlib/crypto/rand/rand_windows.c.v b/vlib/crypto/rand/rand_windows.c.v index bb6ffd247f0292..4e9caca95e69e8 100644 --- a/vlib/crypto/rand/rand_windows.c.v +++ b/vlib/crypto/rand/rand_windows.c.v @@ -15,8 +15,8 @@ const bcrypt_use_system_preferred_rng = 0x00000002 pub fn read(bytes_needed int) ![]u8 { mut buffer := []u8{len: bytes_needed} // use bcrypt_use_system_preferred_rng because we passed null as algo - status := C.BCryptGenRandom(0, buffer.data, bytes_needed, rand.bcrypt_use_system_preferred_rng) - if status != rand.status_success { + status := C.BCryptGenRandom(0, buffer.data, bytes_needed, bcrypt_use_system_preferred_rng) + if status != status_success { return &ReadError{} } return buffer diff --git a/vlib/crypto/sha1/sha1.v b/vlib/crypto/sha1/sha1.v index a17387a151e28e..073631405966c7 100644 --- a/vlib/crypto/sha1/sha1.v +++ b/vlib/crypto/sha1/sha1.v @@ -44,18 +44,18 @@ pub fn (mut d Digest) free() { } fn (mut d Digest) init() { - d.x = []u8{len: sha1.chunk} + d.x = []u8{len: chunk} d.h = []u32{len: (5)} d.reset() } // reset the state of the Digest `d` pub fn (mut d Digest) reset() { - d.h[0] = u32(sha1.init0) - d.h[1] = u32(sha1.init1) - d.h[2] = u32(sha1.init2) - d.h[3] = u32(sha1.init3) - d.h[4] = u32(sha1.init4) + d.h[0] = u32(init0) + d.h[1] = u32(init1) + d.h[2] = u32(init2) + d.h[3] = u32(init3) + d.h[4] = u32(init4) d.nx = 0 d.len = 0 } @@ -85,7 +85,7 @@ pub fn (mut d Digest) write(p_ []u8) !int { if d.nx > 0 { n := copy(mut d.x[d.nx..], p) d.nx += n - if d.nx == sha1.chunk { + if d.nx == chunk { block(mut d, d.x) d.nx = 0 } @@ -95,8 +95,8 @@ pub fn (mut d Digest) write(p_ []u8) !int { p = p[n..] } } - if p.len >= sha1.chunk { - n := p.len & ~(sha1.chunk - 1) + if p.len >= chunk { + n := p.len & ~(chunk - 1) block(mut d, p[..n]) if n >= p.len { p = [] @@ -139,7 +139,7 @@ fn (mut d Digest) checksum_internal() []u8 { len <<= 3 binary.big_endian_put_u64(mut tmp, len) d.write(tmp[..8]) or { panic(err) } - mut digest := []u8{len: sha1.size} + mut digest := []u8{len: size} binary.big_endian_put_u32(mut digest, d.h[0]) binary.big_endian_put_u32(mut digest[4..], d.h[1]) binary.big_endian_put_u32(mut digest[8..], d.h[2]) @@ -171,12 +171,12 @@ fn block(mut dig Digest, p []u8) { // size returns the size of the checksum in bytes. pub fn (d &Digest) size() int { - return sha1.size + return size } // block_size returns the block size of the checksum in bytes. pub fn (d &Digest) block_size() int { - return sha1.block_size + return block_size } // hexhash returns a hexadecimal SHA1 hash sum `string` of `s`. diff --git a/vlib/crypto/sha1/sha1block_generic.v b/vlib/crypto/sha1/sha1block_generic.v index f2cd2438d0d527..d824fda0f59ed6 100644 --- a/vlib/crypto/sha1/sha1block_generic.v +++ b/vlib/crypto/sha1/sha1block_generic.v @@ -40,7 +40,7 @@ fn block_generic(mut dig Digest, p_ []u8) { mut i := 0 for i < 16 { f := b & c | (~b) & d - t := bits.rotate_left_32(a, 5) + f + e + w[i & 0xf] + u32(sha1._k0) + t := bits.rotate_left_32(a, 5) + f + e + w[i & 0xf] + u32(_k0) e = d d = c c = bits.rotate_left_32(b, 30) @@ -52,7 +52,7 @@ fn block_generic(mut dig Digest, p_ []u8) { tmp := w[(i - 3) & 0xf] ^ w[(i - 8) & 0xf] ^ w[(i - 14) & 0xf] ^ w[i & 0xf] w[i & 0xf] = (tmp << 1) | (tmp >> (32 - 1)) f := b & c | (~b) & d - t := bits.rotate_left_32(a, 5) + f + e + w[i & 0xf] + u32(sha1._k0) + t := bits.rotate_left_32(a, 5) + f + e + w[i & 0xf] + u32(_k0) e = d d = c c = bits.rotate_left_32(b, 30) @@ -64,7 +64,7 @@ fn block_generic(mut dig Digest, p_ []u8) { tmp := w[(i - 3) & 0xf] ^ w[(i - 8) & 0xf] ^ w[(i - 14) & 0xf] ^ w[i & 0xf] w[i & 0xf] = (tmp << 1) | (tmp >> (32 - 1)) f := b ^ c ^ d - t := bits.rotate_left_32(a, 5) + f + e + w[i & 0xf] + u32(sha1._k1) + t := bits.rotate_left_32(a, 5) + f + e + w[i & 0xf] + u32(_k1) e = d d = c c = bits.rotate_left_32(b, 30) @@ -76,7 +76,7 @@ fn block_generic(mut dig Digest, p_ []u8) { tmp := w[(i - 3) & 0xf] ^ w[(i - 8) & 0xf] ^ w[(i - 14) & 0xf] ^ w[i & 0xf] w[i & 0xf] = (tmp << 1) | (tmp >> (32 - 1)) f := ((b | c) & d) | (b & c) - t := bits.rotate_left_32(a, 5) + f + e + w[i & 0xf] + u32(sha1._k2) + t := bits.rotate_left_32(a, 5) + f + e + w[i & 0xf] + u32(_k2) e = d d = c c = bits.rotate_left_32(b, 30) @@ -88,7 +88,7 @@ fn block_generic(mut dig Digest, p_ []u8) { tmp := w[(i - 3) & 0xf] ^ w[(i - 8) & 0xf] ^ w[(i - 14) & 0xf] ^ w[i & 0xf] w[i & 0xf] = (tmp << 1) | (tmp >> (32 - 1)) f := b ^ c ^ d - t := bits.rotate_left_32(a, 5) + f + e + w[i & 0xf] + u32(sha1._k3) + t := bits.rotate_left_32(a, 5) + f + e + w[i & 0xf] + u32(_k3) e = d d = c c = bits.rotate_left_32(b, 30) diff --git a/vlib/crypto/sha256/sha256.v b/vlib/crypto/sha256/sha256.v index e7a4bcc73c318c..290bad38a5a32f 100644 --- a/vlib/crypto/sha256/sha256.v +++ b/vlib/crypto/sha256/sha256.v @@ -58,30 +58,30 @@ pub fn (mut d Digest) free() { fn (mut d Digest) init() { d.h = []u32{len: (8)} - d.x = []u8{len: sha256.chunk} + d.x = []u8{len: chunk} d.reset() } // reset the state of the Digest `d` pub fn (mut d Digest) reset() { if !d.is224 { - d.h[0] = u32(sha256.init0) - d.h[1] = u32(sha256.init1) - d.h[2] = u32(sha256.init2) - d.h[3] = u32(sha256.init3) - d.h[4] = u32(sha256.init4) - d.h[5] = u32(sha256.init5) - d.h[6] = u32(sha256.init6) - d.h[7] = u32(sha256.init7) + d.h[0] = u32(init0) + d.h[1] = u32(init1) + d.h[2] = u32(init2) + d.h[3] = u32(init3) + d.h[4] = u32(init4) + d.h[5] = u32(init5) + d.h[6] = u32(init6) + d.h[7] = u32(init7) } else { - d.h[0] = u32(sha256.init0_224) - d.h[1] = u32(sha256.init1_224) - d.h[2] = u32(sha256.init2_224) - d.h[3] = u32(sha256.init3_224) - d.h[4] = u32(sha256.init4_224) - d.h[5] = u32(sha256.init5_224) - d.h[6] = u32(sha256.init6_224) - d.h[7] = u32(sha256.init7_224) + d.h[0] = u32(init0_224) + d.h[1] = u32(init1_224) + d.h[2] = u32(init2_224) + d.h[3] = u32(init3_224) + d.h[4] = u32(init4_224) + d.h[5] = u32(init5_224) + d.h[6] = u32(init6_224) + d.h[7] = u32(init7_224) } d.nx = 0 d.len = 0 @@ -119,7 +119,7 @@ pub fn (mut d Digest) write(p_ []u8) !int { if d.nx > 0 { n := copy(mut d.x[d.nx..], p) d.nx += n - if d.nx == sha256.chunk { + if d.nx == chunk { block(mut d, d.x) d.nx = 0 } @@ -129,8 +129,8 @@ pub fn (mut d Digest) write(p_ []u8) !int { p = p[n..] } } - if p.len >= sha256.chunk { - n := p.len & ~(sha256.chunk - 1) + if p.len >= chunk { + n := p.len & ~(chunk - 1) block(mut d, p[..n]) if n >= p.len { p = [] @@ -152,7 +152,7 @@ pub fn (d &Digest) sum(b_in []u8) []u8 { hash := d0.checksum_internal() mut b_out := b_in.clone() if d0.is224 { - for b in hash[..sha256.size224] { + for b in hash[..size224] { b_out << b } } else { @@ -182,7 +182,7 @@ fn (mut d Digest) checksum_internal() []u8 { if d.nx != 0 { panic('d.nx != 0') } - mut digest := []u8{len: sha256.size} + mut digest := []u8{len: size} binary.big_endian_put_u32(mut digest, d.h[0]) binary.big_endian_put_u32(mut digest[4..], d.h[1]) binary.big_endian_put_u32(mut digest[8..], d.h[2]) @@ -204,7 +204,7 @@ pub fn (mut d Digest) checksum() []u8 { out := d.checksum_internal() // if this digest has `size224` length, return the correct `size224` checksum if d.is224 { - return out[0..sha256.size224] + return out[0..size224] } // otherwise, returns a normal size return out @@ -228,8 +228,8 @@ pub fn sum224(data []u8) []u8 { mut d := new224() d.write(data) or { panic(err) } sum := d.checksum_internal() - mut sum224 := []u8{len: sha256.size224} - copy(mut sum224, sum[..sha256.size224]) + mut sum224 := []u8{len: size224} + copy(mut sum224, sum[..size224]) return sum224 } @@ -242,14 +242,14 @@ fn block(mut dig Digest, p []u8) { // size returns the size of the checksum in bytes. pub fn (d &Digest) size() int { if !d.is224 { - return sha256.size + return size } - return sha256.size224 + return size224 } // block_size returns the block size of the checksum in bytes. pub fn (d &Digest) block_size() int { - return sha256.block_size + return block_size } // hexhash returns a hexadecimal SHA256 hash sum `string` of `s`. diff --git a/vlib/crypto/sha256/sha256block_generic.v b/vlib/crypto/sha256/sha256block_generic.v index 31b17e3ce6b4be..0b8cfc1ca4cbcb 100644 --- a/vlib/crypto/sha256/sha256block_generic.v +++ b/vlib/crypto/sha256/sha256block_generic.v @@ -113,7 +113,7 @@ fn block_generic(mut dig Digest, p_ []u8) { for i in 0 .. 64 { t1 := h + ((bits.rotate_left_32(e, -6)) ^ (bits.rotate_left_32(e, -11)) ^ (bits.rotate_left_32(e, -25))) + - ((e & f) ^ (~e & g)) + u32(sha256._k[i]) + w[i] + ((e & f) ^ (~e & g)) + u32(_k[i]) + w[i] t2 := ((bits.rotate_left_32(a, -2)) ^ (bits.rotate_left_32(a, -13)) ^ (bits.rotate_left_32(a, -22))) + ((a & b) ^ (a & c) ^ (b & c)) diff --git a/vlib/crypto/sha3/sha3.v b/vlib/crypto/sha3/sha3.v index efafec82180e35..4dff8f58863a3e 100644 --- a/vlib/crypto/sha3/sha3.v +++ b/vlib/crypto/sha3/sha3.v @@ -41,32 +41,32 @@ const xof_pad = u8(0x1f) // new512 initializes the digest structure for a sha3 512 bit hash pub fn new512() !&Digest { - return new_digest(sha3.rate_512, sha3.size_512)! + return new_digest(rate_512, size_512)! } // new384 initializes the digest structure for a sha3 384 bit hash pub fn new384() !&Digest { - return new_digest(sha3.rate_384, sha3.size_384)! + return new_digest(rate_384, size_384)! } // new256 initializes the digest structure for a sha3 256 bit hash pub fn new256() !&Digest { - return new_digest(sha3.rate_256, sha3.size_256)! + return new_digest(rate_256, size_256)! } // new224 initializes the digest structure for a sha3 224 bit hash pub fn new224() !&Digest { - return new_digest(sha3.rate_224, sha3.size_224)! + return new_digest(rate_224, size_224)! } // new256_xof initializes the digest structure for a sha3 256 bit extended output function pub fn new256xof(output_len int) !&Digest { - return new_xof_digest(sha3.xof_rate_256, output_len)! + return new_xof_digest(xof_rate_256, output_len)! } // new128_xof initializes the digest structure for a sha3 128 bit extended output function pub fn new128xof(output_len int) !&Digest { - return new_xof_digest(sha3.xof_rate_128, output_len)! + return new_xof_digest(xof_rate_128, output_len)! } struct HashSizeError { @@ -75,7 +75,7 @@ struct HashSizeError { } fn (err HashSizeError) msg() string { - return 'Hash size ${err.size} must be ${sha3.size_224}, ${sha3.size_256}, ${sha3.size_384}, or ${sha3.size_512}' + return 'Hash size ${err.size} must be ${size_224}, ${size_256}, ${size_384}, or ${size_512}' } struct AbsorptionRateError { @@ -94,7 +94,7 @@ struct XOFRateError { } fn (err XOFRateError) msg() string { - return 'Extended output rate ${err.rate} must be ${sha3.xof_rate_128} or ${sha3.xof_rate_256}' + return 'Extended output rate ${err.rate} must be ${xof_rate_128} or ${xof_rate_256}' } struct XOFSizeError { @@ -125,32 +125,32 @@ mut: // Legal values are 224, 256, 384, and 512. pub fn new_digest(absorption_rate int, hash_size int) !&Digest { match hash_size { - sha3.size_224 { - if absorption_rate != sha3.rate_224 { + size_224 { + if absorption_rate != rate_224 { return AbsorptionRateError{ rate: absorption_rate size: hash_size } } } - sha3.size_256 { - if absorption_rate != sha3.rate_256 { + size_256 { + if absorption_rate != rate_256 { return AbsorptionRateError{ rate: absorption_rate size: hash_size } } } - sha3.size_384 { - if absorption_rate != sha3.rate_384 { + size_384 { + if absorption_rate != rate_384 { return AbsorptionRateError{ rate: absorption_rate size: hash_size } } } - sha3.size_512 { - if absorption_rate != sha3.rate_512 { + size_512 { + if absorption_rate != rate_512 { return AbsorptionRateError{ rate: absorption_rate size: hash_size @@ -166,7 +166,7 @@ pub fn new_digest(absorption_rate int, hash_size int) !&Digest { d := Digest{ rate: absorption_rate - suffix: sha3.hash_pad + suffix: hash_pad output_len: hash_size s: State{} } @@ -185,7 +185,7 @@ pub fn new_digest(absorption_rate int, hash_size int) !&Digest { // Legal values are positive integers. pub fn new_xof_digest(absorption_rate int, hash_size int) !&Digest { match absorption_rate { - sha3.xof_rate_128, sha3.xof_rate_256 { + xof_rate_128, xof_rate_256 { if hash_size < 1 { return XOFSizeError{ size: hash_size @@ -201,7 +201,7 @@ pub fn new_xof_digest(absorption_rate int, hash_size int) !&Digest { d := Digest{ rate: absorption_rate - suffix: sha3.xof_pad + suffix: xof_pad output_len: hash_size s: State{} } diff --git a/vlib/crypto/sha3/sha3_state_generic.v b/vlib/crypto/sha3/sha3_state_generic.v index 26a7bdf20d5ec3..67f02ed8b40458 100644 --- a/vlib/crypto/sha3/sha3_state_generic.v +++ b/vlib/crypto/sha3/sha3_state_generic.v @@ -212,7 +212,7 @@ const rho_offsets = [[int(0), 36, 3, 41, 18], [int(1), 44, 10, 45, 2], fn (mut s State) rho() { for x in 0 .. 5 { for y in 0 .. 5 { - s.a[x][y] = bits.rotate_left_64(s.a[x][y], sha3.rho_offsets[x][y]) + s.a[x][y] = bits.rotate_left_64(s.a[x][y], rho_offsets[x][y]) } } } @@ -298,7 +298,7 @@ const iota_round_constants = [u64(0x0000000000000001), 0x0000000000008082, 0x800 // to xor with lane 0, 0. @[inline] fn (mut s State) iota(round_index int) { - s.a[0][0] ^= sha3.iota_round_constants[round_index] + s.a[0][0] ^= iota_round_constants[round_index] } fn (s State) str() string { diff --git a/vlib/crypto/sha3/sha3_state_test.v b/vlib/crypto/sha3/sha3_state_test.v index c77762e2781326..3a8841fdb63694 100644 --- a/vlib/crypto/sha3/sha3_state_test.v +++ b/vlib/crypto/sha3/sha3_state_test.v @@ -135,7 +135,7 @@ fn test_2_x_24_rounds_on_zero() { } mut round := 0 - for rv in sha3.first_24 { + for rv in first_24 { s.theta() println('round ${round} verifying theta') @@ -187,13 +187,13 @@ fn test_2_x_24_rounds_on_zero() { // check the actual bytes in the state first_state_bytes := s.to_bytes() for i in 0 .. 200 { - assert first_state_bytes[i] == sha3.first_state_as_bytes[i], 'examining state byte ${i} ${first_state_bytes[i]:02x} != ${sha3.first_state_as_bytes[i]:02x}' + assert first_state_bytes[i] == first_state_as_bytes[i], 'examining state byte ${i} ${first_state_bytes[i]:02x} != ${first_state_as_bytes[i]:02x}' } println('verifying using previous state') round = 0 - for rv in sha3.second_24 { + for rv in second_24 { s.theta() println('round ${round} verifying theta') @@ -245,7 +245,7 @@ fn test_2_x_24_rounds_on_zero() { // check the actual bytes in the state second_state_bytes := s.to_bytes() for i in 0 .. 200 { - assert second_state_bytes[i] == sha3.second_state_as_bytes[i], 'examining state byte ${i} ${second_state_bytes[i]:02x} != ${sha3.second_state_as_bytes[i]:02x}' + assert second_state_bytes[i] == second_state_as_bytes[i], 'examining state byte ${i} ${second_state_bytes[i]:02x} != ${second_state_as_bytes[i]:02x}' } } @@ -255,32 +255,32 @@ fn test_to_from_bytes() { // going from bytes to state as 5 x 5 u64 words to bytes // should give you the original byte array - s.from_bytes(sha3.first_state_as_bytes) + s.from_bytes(first_state_as_bytes) for x in 0 .. 5 { for y in 0 .. 5 { - assert s.a[x][y] == sha3.first_state_as_words[x][y], 'x ${x} y ${y} ${s.a[x][y]:016x} != ${sha3.first_state_as_words[x][y]:016x}' + assert s.a[x][y] == first_state_as_words[x][y], 'x ${x} y ${y} ${s.a[x][y]:016x} != ${first_state_as_words[x][y]:016x}' } } mut result_bytes := s.to_bytes() for i in 0 .. 200 { - assert result_bytes[i] == sha3.first_state_as_bytes[i], 'examining state byte ${i} ${result_bytes[i]:02x} != ${sha3.first_state_as_bytes[i]:02x}' + assert result_bytes[i] == first_state_as_bytes[i], 'examining state byte ${i} ${result_bytes[i]:02x} != ${first_state_as_bytes[i]:02x}' } - s.from_bytes(sha3.second_state_as_bytes) + s.from_bytes(second_state_as_bytes) for x in 0 .. 5 { for y in 0 .. 5 { - assert s.a[x][y] == sha3.second_state_as_words[x][y], 'x ${x} y ${y} ${s.a[x][y]:016x} != ${sha3.second_state_as_words[x][y]:016x}' + assert s.a[x][y] == second_state_as_words[x][y], 'x ${x} y ${y} ${s.a[x][y]:016x} != ${second_state_as_words[x][y]:016x}' } } result_bytes = s.to_bytes() for i in 0 .. 200 { - assert result_bytes[i] == sha3.second_state_as_bytes[i], 'examining state byte ${i} ${result_bytes[i]:02x} != ${sha3.second_state_as_bytes[i]:02x}' + assert result_bytes[i] == second_state_as_bytes[i], 'examining state byte ${i} ${result_bytes[i]:02x} != ${second_state_as_bytes[i]:02x}' } } diff --git a/vlib/crypto/sha3/sha3_test.v b/vlib/crypto/sha3/sha3_test.v index caf030efc96f70..fcec038d5ba98b 100644 --- a/vlib/crypto/sha3/sha3_test.v +++ b/vlib/crypto/sha3/sha3_test.v @@ -20,16 +20,16 @@ fn test_0_length_hash() { input := []u8{} output_224 := sum224(input) - assert output_224 == sha3.empty_message_sha3_224 + assert output_224 == empty_message_sha3_224 output_256 := sum256(input) - assert output_256 == sha3.empty_message_sha3_256 + assert output_256 == empty_message_sha3_256 output_384 := sum384(input) - assert output_384 == sha3.empty_message_sha3_384 + assert output_384 == empty_message_sha3_384 output_512 := sum512(input) - assert output_512 == sha3.empty_message_sha3_512 + assert output_512 == empty_message_sha3_512 } const input_200 = [u8(0xa3), 0xa3, 0xa3, 0xa3, 0xa3, 0xa3, 0xa3, 0xa3, 0xa3, 0xa3, 0xa3, 0xa3, @@ -66,17 +66,17 @@ const test_200_message_sha3_512 = [u8(0xE7), 0x6D, 0xFA, 0xD2, 0x20, 0x84, 0xA8, 0x54, 0x5A, 0x1C, 0xE8, 0xBA, 0x00] fn test_200_length_hash() { - output_224 := sum224(sha3.input_200) - assert output_224 == sha3.test_200_message_sha3_224 + output_224 := sum224(input_200) + assert output_224 == test_200_message_sha3_224 - output_256 := sum256(sha3.input_200) - assert output_256 == sha3.test_200_message_sha3_256 + output_256 := sum256(input_200) + assert output_256 == test_200_message_sha3_256 - output_384 := sum384(sha3.input_200) - assert output_384 == sha3.test_200_message_sha3_384 + output_384 := sum384(input_200) + assert output_384 == test_200_message_sha3_384 - output_512 := sum512(sha3.input_200) - assert output_512 == sha3.test_200_message_sha3_512 + output_512 := sum512(input_200) + assert output_512 == test_200_message_sha3_512 } const empty_message_shake128 = [u8(0x7F), 0x9C, 0x2B, 0xA4, 0xE8, 0x8F, 0x82, 0x7D, 0x61, 0x60, @@ -151,10 +151,10 @@ fn test_0_length_xof() { input := []u8{} output_128 := shake128(input, 512) - assert output_128 == sha3.empty_message_shake128 + assert output_128 == empty_message_shake128 output_256 := shake256(input, 512) - assert output_256 == sha3.empty_message_shake256 + assert output_256 == empty_message_shake256 } const test_200_message_shake128 = [u8(0x13), 0x1A, 0xB8, 0xD2, 0xB5, 0x94, 0x94, 0x6B, 0x9C, 0x81, @@ -228,9 +228,9 @@ const test_200_message_shake256 = [u8(0xCD), 0x8A, 0x92, 0x0E, 0xD1, 0x41, 0xAA, fn test_200_length_xof() { input := []u8{} - output_128 := shake128(sha3.input_200, 512) - assert output_128 == sha3.test_200_message_shake128 + output_128 := shake128(input_200, 512) + assert output_128 == test_200_message_shake128 - output_256 := shake256(sha3.input_200, 512) - assert output_256 == sha3.test_200_message_shake256 + output_256 := shake256(input_200, 512) + assert output_256 == test_200_message_shake256 } diff --git a/vlib/crypto/sha512/sha512.v b/vlib/crypto/sha512/sha512.v index 045b633b4a9a10..c6764b7cf1eeae 100644 --- a/vlib/crypto/sha512/sha512.v +++ b/vlib/crypto/sha512/sha512.v @@ -80,7 +80,7 @@ pub fn (mut d Digest) free() { fn (mut d Digest) init() { d.h = []u64{len: (8)} - d.x = []u8{len: sha512.chunk} + d.x = []u8{len: chunk} d.reset() } @@ -88,44 +88,44 @@ fn (mut d Digest) init() { pub fn (mut d Digest) reset() { match d.function { .sha384 { - d.h[0] = sha512.init0_384 - d.h[1] = sha512.init1_384 - d.h[2] = sha512.init2_384 - d.h[3] = sha512.init3_384 - d.h[4] = sha512.init4_384 - d.h[5] = sha512.init5_384 - d.h[6] = sha512.init6_384 - d.h[7] = sha512.init7_384 + d.h[0] = init0_384 + d.h[1] = init1_384 + d.h[2] = init2_384 + d.h[3] = init3_384 + d.h[4] = init4_384 + d.h[5] = init5_384 + d.h[6] = init6_384 + d.h[7] = init7_384 } .sha512_224 { - d.h[0] = sha512.init0_224 - d.h[1] = sha512.init1_224 - d.h[2] = sha512.init2_224 - d.h[3] = sha512.init3_224 - d.h[4] = sha512.init4_224 - d.h[5] = sha512.init5_224 - d.h[6] = sha512.init6_224 - d.h[7] = sha512.init7_224 + d.h[0] = init0_224 + d.h[1] = init1_224 + d.h[2] = init2_224 + d.h[3] = init3_224 + d.h[4] = init4_224 + d.h[5] = init5_224 + d.h[6] = init6_224 + d.h[7] = init7_224 } .sha512_256 { - d.h[0] = sha512.init0_256 - d.h[1] = sha512.init1_256 - d.h[2] = sha512.init2_256 - d.h[3] = sha512.init3_256 - d.h[4] = sha512.init4_256 - d.h[5] = sha512.init5_256 - d.h[6] = sha512.init6_256 - d.h[7] = sha512.init7_256 + d.h[0] = init0_256 + d.h[1] = init1_256 + d.h[2] = init2_256 + d.h[3] = init3_256 + d.h[4] = init4_256 + d.h[5] = init5_256 + d.h[6] = init6_256 + d.h[7] = init7_256 } else { - d.h[0] = sha512.init0 - d.h[1] = sha512.init1 - d.h[2] = sha512.init2 - d.h[3] = sha512.init3 - d.h[4] = sha512.init4 - d.h[5] = sha512.init5 - d.h[6] = sha512.init6 - d.h[7] = sha512.init7 + d.h[0] = init0 + d.h[1] = init1 + d.h[2] = init2 + d.h[3] = init3 + d.h[4] = init4 + d.h[5] = init5 + d.h[6] = init6 + d.h[7] = init7 } } d.nx = 0 @@ -178,7 +178,7 @@ pub fn (mut d Digest) write(p_ []u8) !int { if d.nx > 0 { n := copy(mut d.x[d.nx..], p) d.nx += n - if d.nx == sha512.chunk { + if d.nx == chunk { block(mut d, d.x) d.nx = 0 } @@ -188,8 +188,8 @@ pub fn (mut d Digest) write(p_ []u8) !int { p = p[n..] } } - if p.len >= sha512.chunk { - n := p.len & ~(sha512.chunk - 1) + if p.len >= chunk { + n := p.len & ~(chunk - 1) block(mut d, p[..n]) if n >= p.len { p = [] @@ -212,17 +212,17 @@ pub fn (d &Digest) sum(b_in []u8) []u8 { mut b_out := b_in.clone() match d0.function { .sha384 { - for b in hash[..sha512.size384] { + for b in hash[..size384] { b_out << b } } .sha512_224 { - for b in hash[..sha512.size224] { + for b in hash[..size224] { b_out << b } } .sha512_256 { - for b in hash[..sha512.size256] { + for b in hash[..size256] { b_out << b } } @@ -255,7 +255,7 @@ fn (mut d Digest) checksum_internal() []u8 { if d.nx != 0 { panic('d.nx != 0') } - mut digest := []u8{len: sha512.size} + mut digest := []u8{len: size} binary.big_endian_put_u64(mut digest, d.h[0]) binary.big_endian_put_u64(mut digest[8..], d.h[1]) binary.big_endian_put_u64(mut digest[16..], d.h[2]) @@ -277,13 +277,13 @@ pub fn (mut d Digest) checksum() []u8 { out := d.checksum_internal() match d.function { .sha384 { - return out[0..sha512.size384] + return out[0..size384] } .sha512_224 { - return out[0..sha512.size224] + return out[0..size224] } .sha512_256 { - return out[0..sha512.size256] + return out[0..size256] } else { return out @@ -303,8 +303,8 @@ pub fn sum384(data []u8) []u8 { mut d := new_digest(.sha384) d.write(data) or { panic(err) } sum := d.checksum_internal() - mut sum384 := []u8{len: sha512.size384} - copy(mut sum384, sum[..sha512.size384]) + mut sum384 := []u8{len: size384} + copy(mut sum384, sum[..size384]) return sum384 } @@ -313,8 +313,8 @@ pub fn sum512_224(data []u8) []u8 { mut d := new_digest(.sha512_224) d.write(data) or { panic(err) } sum := d.checksum_internal() - mut sum224 := []u8{len: sha512.size224} - copy(mut sum224, sum[..sha512.size224]) + mut sum224 := []u8{len: size224} + copy(mut sum224, sum[..size224]) return sum224 } @@ -323,8 +323,8 @@ pub fn sum512_256(data []u8) []u8 { mut d := new_digest(.sha512_256) d.write(data) or { panic(err) } sum := d.checksum_internal() - mut sum256 := []u8{len: sha512.size256} - copy(mut sum256, sum[..sha512.size256]) + mut sum256 := []u8{len: size256} + copy(mut sum256, sum[..size256]) return sum256 } @@ -337,16 +337,16 @@ fn block(mut dig Digest, p []u8) { // size returns the size of the checksum in bytes. pub fn (d &Digest) size() int { match d.function { - .sha512_224 { return sha512.size224 } - .sha512_256 { return sha512.size256 } - .sha384 { return sha512.size384 } - else { return sha512.size } + .sha512_224 { return size224 } + .sha512_256 { return size256 } + .sha384 { return size384 } + else { return size } } } // block_size returns the block size of the checksum in bytes. pub fn (d &Digest) block_size() int { - return sha512.block_size + return block_size } // hexhash returns a hexadecimal SHA512 hash sum `string` of `s`. diff --git a/vlib/datatypes/bloom_filter.v b/vlib/datatypes/bloom_filter.v index e7590f2ae5631b..bbe30165275c68 100644 --- a/vlib/datatypes/bloom_filter.v +++ b/vlib/datatypes/bloom_filter.v @@ -51,8 +51,8 @@ pub fn new_bloom_filter[T](hash_func fn (T) u32, table_size int, num_functions i if table_size <= 0 { return error('table_size should great that 0') } - if num_functions < 1 || num_functions > datatypes.salts.len { - return error('num_functions should between 1~${datatypes.salts.len}') + if num_functions < 1 || num_functions > salts.len { + return error('num_functions should between 1~${salts.len}') } return &BloomFilter[T]{ @@ -68,7 +68,7 @@ pub fn (mut b BloomFilter[T]) add(element T) { hash := b.hash_func(element) for i in 0 .. b.num_functions { - subhash := hash ^ datatypes.salts[i] + subhash := hash ^ salts[i] index := int(subhash % u32(b.table_size)) bb := u8((1 << (index % 8))) b.table[index / 8] |= bb @@ -79,7 +79,7 @@ pub fn (mut b BloomFilter[T]) add(element T) { pub fn (b &BloomFilter[T]) exists(element T) bool { hash := b.hash_func(element) for i in 0 .. b.num_functions { - subhash := hash ^ datatypes.salts[i] + subhash := hash ^ salts[i] index := int(subhash % u32(b.table_size)) bb := b.table[index / 8] bit := 1 << (index % 8) diff --git a/vlib/db/mysql/stmt.c.v b/vlib/db/mysql/stmt.c.v index e293982f87f1bc..abce4840558c9a 100644 --- a/vlib/db/mysql/stmt.c.v +++ b/vlib/db/mysql/stmt.c.v @@ -182,73 +182,73 @@ fn (stmt Stmt) get_field_count() u16 { // bind_bool binds a single boolean value to the statement `stmt` pub fn (mut stmt Stmt) bind_bool(b &bool) { - stmt.bind(mysql.mysql_type_tiny, b, 0) + stmt.bind(mysql_type_tiny, b, 0) } // bind_byte binds a single byte value to the statement `stmt` pub fn (mut stmt Stmt) bind_byte(b &u8) { - stmt.bind(mysql.mysql_type_tiny, b, 0) + stmt.bind(mysql_type_tiny, b, 0) } // bind_u8 binds a single u8 value to the statement `stmt` pub fn (mut stmt Stmt) bind_u8(b &u8) { - stmt.bind(mysql.mysql_type_tiny, b, 0) + stmt.bind(mysql_type_tiny, b, 0) } // bind_i8 binds a single i8 value to the statement `stmt` pub fn (mut stmt Stmt) bind_i8(b &i8) { - stmt.bind(mysql.mysql_type_tiny, b, 0) + stmt.bind(mysql_type_tiny, b, 0) } // bind_i16 binds a single i16 value to the statement `stmt` pub fn (mut stmt Stmt) bind_i16(b &i16) { - stmt.bind(mysql.mysql_type_short, b, 0) + stmt.bind(mysql_type_short, b, 0) } // bind_u16 binds a single u16 value to the statement `stmt` pub fn (mut stmt Stmt) bind_u16(b &u16) { - stmt.bind(mysql.mysql_type_short, b, 0) + stmt.bind(mysql_type_short, b, 0) } // bind_int binds a single int value to the statement `stmt` pub fn (mut stmt Stmt) bind_int(b &int) { - stmt.bind(mysql.mysql_type_long, b, 0) + stmt.bind(mysql_type_long, b, 0) } // bind_u32 binds a single u32 value to the statement `stmt` pub fn (mut stmt Stmt) bind_u32(b &u32) { - stmt.bind(mysql.mysql_type_long, b, 0) + stmt.bind(mysql_type_long, b, 0) } // bind_i64 binds a single i64 value to the statement `stmt` pub fn (mut stmt Stmt) bind_i64(b &i64) { - stmt.bind(mysql.mysql_type_longlong, b, 0) + stmt.bind(mysql_type_longlong, b, 0) } // bind_u64 binds a single u64 value to the statement `stmt` pub fn (mut stmt Stmt) bind_u64(b &u64) { - stmt.bind(mysql.mysql_type_longlong, b, 0) + stmt.bind(mysql_type_longlong, b, 0) } // bind_f32 binds a single f32 value to the statement `stmt` pub fn (mut stmt Stmt) bind_f32(b &f32) { - stmt.bind(mysql.mysql_type_float, b, 0) + stmt.bind(mysql_type_float, b, 0) } // bind_f64 binds a single f64 value to the statement `stmt` pub fn (mut stmt Stmt) bind_f64(b &f64) { - stmt.bind(mysql.mysql_type_double, b, 0) + stmt.bind(mysql_type_double, b, 0) } // bind_text binds a single string value to the statement `stmt` pub fn (mut stmt Stmt) bind_text(b string) { - stmt.bind(mysql.mysql_type_string, b.str, u32(b.len)) + stmt.bind(mysql_type_string, b.str, u32(b.len)) } // bind_null binds a single NULL value to the statement `stmt` pub fn (mut stmt Stmt) bind_null() { stmt.binds << C.MYSQL_BIND{ - buffer_type: mysql.mysql_type_null + buffer_type: mysql_type_null length: 0 } } diff --git a/vlib/db/sqlite/sqlite.c.v b/vlib/db/sqlite/sqlite.c.v index fc3b7525406801..a75400e5daac3e 100644 --- a/vlib/db/sqlite/sqlite.c.v +++ b/vlib/db/sqlite/sqlite.c.v @@ -188,7 +188,7 @@ pub fn (db &DB) q_int(query string) !int { } C.sqlite3_prepare_v2(db.conn, &char(query.str), query.len, &stmt, 0) code := C.sqlite3_step(stmt) - if code != sqlite.sqlite_row { + if code != sqlite_row { return db.error_message(code, query) } @@ -204,7 +204,7 @@ pub fn (db &DB) q_string(query string) !string { } C.sqlite3_prepare_v2(db.conn, &char(query.str), query.len, &stmt, 0) code := C.sqlite3_step(stmt) - if code != sqlite.sqlite_row { + if code != sqlite_row { return db.error_message(code, query) } @@ -220,7 +220,7 @@ pub fn (db &DB) exec(query string) ![]Row { C.sqlite3_finalize(stmt) } mut code := C.sqlite3_prepare_v2(db.conn, &char(query.str), query.len, &stmt, 0) - if code != sqlite.sqlite_ok { + if code != sqlite_ok { return db.error_message(code, query) } @@ -259,7 +259,7 @@ pub fn (db &DB) exec_one(query string) !Row { if rows.len == 0 { return &SQLError{ msg: 'No rows' - code: sqlite.sqlite_done + code: sqlite_done } } res := rows[0] @@ -314,7 +314,7 @@ pub fn (db &DB) exec_param_many(query string, params []string) ![]Row { mut rows := []Row{} for { res = C.sqlite3_step(stmt) - if res != sqlite.sqlite_row { + if res != sqlite_row { if rows.len == 0 && is_error(res) { return db.error_message(res, query) } diff --git a/vlib/dl/dl.v b/vlib/dl/dl.v index fadc63127308e5..705fb2178175af 100644 --- a/vlib/dl/dl.v +++ b/vlib/dl/dl.v @@ -23,7 +23,7 @@ pub fn get_shared_library_extension() string { // shared libraries. @[inline] pub fn get_libname(libname string) string { - return '${libname}${dl.dl_ext}' + return '${libname}${dl_ext}' } // open_opt tries to load a given dynamic shared object. diff --git a/vlib/dl/loader/loader.v b/vlib/dl/loader/loader.v index 733d080bfbb978..57217043a5e8de 100644 --- a/vlib/dl/loader/loader.v +++ b/vlib/dl/loader/loader.v @@ -28,7 +28,7 @@ __global ( fn register_dl_loader(dl_loader &DynamicLibLoader) ! { if dl_loader.key in registered_dl_loaders { - return loader.dl_register_issue_err + return dl_register_issue_err } registered_dl_loaders[dl_loader.key] = dl_loader } @@ -77,7 +77,7 @@ fn new_dynamic_lib_loader(conf DynamicLibLoaderConfig) !&DynamicLibLoader { paths << conf.paths if paths.len == 0 { - return loader.dl_no_path_issue_err + return dl_no_path_issue_err } mut dl_loader := &DynamicLibLoader{ @@ -112,7 +112,7 @@ pub fn (mut dl_loader DynamicLibLoader) open() !voidptr { } } - return loader.dl_open_issue_err + return dl_open_issue_err } // close closes the dynamic library. @@ -124,7 +124,7 @@ pub fn (mut dl_loader DynamicLibLoader) close() ! { } } - return loader.dl_close_issue_err + return dl_close_issue_err } // get_sym gets a symbol from the dynamic library. @@ -140,7 +140,7 @@ pub fn (mut dl_loader DynamicLibLoader) get_sym(name string) !voidptr { } dl_loader.close()! - return loader.dl_sym_issue_err + return dl_sym_issue_err } // unregister unregisters the DynamicLibLoader. diff --git a/vlib/dlmalloc/dlmalloc.v b/vlib/dlmalloc/dlmalloc.v index 637ececa600aa0..7ea557bd5209c4 100644 --- a/vlib/dlmalloc/dlmalloc.v +++ b/vlib/dlmalloc/dlmalloc.v @@ -63,7 +63,7 @@ fn chunk_overhead() usize { @[inline] fn min_large_size() usize { - return 1 << dlmalloc.tree_bin_shift + return 1 << tree_bin_shift } @[inline] @@ -140,15 +140,15 @@ fn is_aligned(a usize) bool { } fn is_small(s usize) bool { - return s >> dlmalloc.small_bin_shift < dlmalloc.n_small_bins + return s >> small_bin_shift < n_small_bins } fn small_index2size(idx u32) usize { - return usize(idx) << dlmalloc.small_bin_shift + return usize(idx) << small_bin_shift } fn small_index(size usize) u32 { - return u32(size >> dlmalloc.small_bin_shift) + return u32(size >> small_bin_shift) } fn align_up(a usize, alignment usize) usize { @@ -169,10 +169,10 @@ fn least_bit(x u32) u32 { fn leftshift_for_tree_index(x u32) u32 { y := usize(x) - if y == dlmalloc.n_tree_bins - 1 { + if y == n_tree_bins - 1 { return 0 } else { - return u32(sizeof(usize) * 8 - 1 - ((y >> 1) + dlmalloc.tree_bin_shift - 2)) + return u32(sizeof(usize) * 8 - 1 - ((y >> 1) + tree_bin_shift - 2)) } } @@ -242,8 +242,8 @@ pub fn new(system_allocator Allocator) Dlmalloc { return Dlmalloc{ smallmap: 0 treemap: 0 - smallbins: unsafe { [(dlmalloc.n_small_bins + 1) * 2]&Chunk{} } - treebins: unsafe { [dlmalloc.n_tree_bins]&TreeChunk{} } + smallbins: unsafe { [(n_small_bins + 1) * 2]&Chunk{} } + treebins: unsafe { [n_tree_bins]&TreeChunk{} } dvsize: 0 topsize: 0 dv: unsafe { nil } @@ -293,15 +293,15 @@ const inuse = pinuse | cinuse const flag_bits = pinuse | cinuse | flag4 fn fencepost_head() usize { - return dlmalloc.inuse | sizeof(usize) + return inuse | sizeof(usize) } fn (c &Chunk) size() usize { - return c.head & ~dlmalloc.flag_bits + return c.head & ~flag_bits } fn (c &Chunk) mmapped() bool { - return c.head & dlmalloc.inuse == 0 + return c.head & inuse == 0 } fn (c &Chunk) next() &Chunk { @@ -317,39 +317,39 @@ fn (c &Chunk) prev() &Chunk { } fn (c &Chunk) cinuse() bool { - return c.head & dlmalloc.cinuse != 0 + return c.head & cinuse != 0 } fn (c &Chunk) pinuse() bool { - return c.head & dlmalloc.pinuse != 0 + return c.head & pinuse != 0 } fn (mut c Chunk) clear_pinuse() { - c.head &= ~dlmalloc.pinuse + c.head &= ~pinuse } fn (c &Chunk) inuse() bool { - return c.head & dlmalloc.inuse != dlmalloc.pinuse + return c.head & inuse != pinuse } fn (mut c Chunk) set_inuse(size usize) { - c.head = (c.head & dlmalloc.pinuse) | size | dlmalloc.cinuse + c.head = (c.head & pinuse) | size | cinuse mut next := c.plus_offset(size) - next.head |= dlmalloc.pinuse + next.head |= pinuse } fn (mut c Chunk) set_inuse_and_pinuse(size usize) { - c.head = dlmalloc.pinuse | size | dlmalloc.cinuse + c.head = pinuse | size | cinuse mut next := c.plus_offset(size) - next.head |= dlmalloc.pinuse + next.head |= pinuse } fn (mut c Chunk) set_size_and_pinuse_of_inuse_chunk(size usize) { - c.head = size | dlmalloc.pinuse | dlmalloc.cinuse + c.head = size | pinuse | cinuse } fn (mut c Chunk) set_size_and_pinuse_of_free_chunk(size usize) { - c.head = size | dlmalloc.pinuse + c.head = size | pinuse c.set_foot(size) } @@ -395,7 +395,7 @@ fn (tree &TreeChunk) chunk() &Chunk { } fn (tree &TreeChunk) size(treemap u32) usize { - return tree.chunk.head & ~dlmalloc.flag_bits + return tree.chunk.head & ~flag_bits } @[unsafe] @@ -415,7 +415,7 @@ fn (tree &TreeChunk) prev() &TreeChunk { const extern = 1 << 0 fn (seg &Segment) is_extern() bool { - return seg.flags & dlmalloc.extern != 0 + return seg.flags & extern != 0 } fn (seg &Segment) can_release_part(sys_alloc &Allocator) bool { @@ -453,14 +453,14 @@ fn (mut dl Dlmalloc) treebin_at(idx u32) &&TreeChunk { } fn (dl &Dlmalloc) compute_tree_index(size usize) u32 { - x := size >> dlmalloc.tree_bin_shift + x := size >> tree_bin_shift if x == 0 { return 0 } else if x > 0xffff { - return dlmalloc.n_tree_bins - 1 + return n_tree_bins - 1 } else { k := sizeof(usize) * 8 - 1 - usize_leading_zeros(x) - return u32((k << 1) + ((size >> (k + dlmalloc.tree_bin_shift - 1)) & 1)) + return u32((k << 1) + ((size >> (k + tree_bin_shift - 1)) & 1)) } } @@ -615,7 +615,7 @@ pub fn (mut dl Dlmalloc) free_(mem voidptr) { p = prev if voidptr(p) != voidptr(dl.dv) { dl.unlink_chunk(p, prevsize) - } else if (next.head & dlmalloc.inuse) == dlmalloc.inuse { + } else if (next.head & inuse) == inuse { dl.dvsize = psize p.set_free_with_pinuse(psize, next) @@ -630,7 +630,7 @@ pub fn (mut dl Dlmalloc) free_(mem voidptr) { tsize := dl.topsize dl.top = p - p.head = tsize | dlmalloc.pinuse + p.head = tsize | pinuse if voidptr(p) == voidptr(dl.dv) { dl.dv = nil dl.dvsize = 0 @@ -765,10 +765,10 @@ fn (mut dl Dlmalloc) release_unused_segments() usize { pred = sp sp = next } - dl.release_checks = if nsegs > dlmalloc.max_release_check_rate { + dl.release_checks = if nsegs > max_release_check_rate { nsegs } else { - dlmalloc.max_release_check_rate + max_release_check_rate } return released } @@ -1024,7 +1024,7 @@ fn (mut dl Dlmalloc) malloc_real(size usize) voidptr { mut p := dl.top dl.top = p.plus_offset(nb) mut r := dl.top - r.head = rsize | dlmalloc.pinuse + r.head = rsize | pinuse p.set_size_and_pinuse_of_inuse_chunk(nb) ret := p.to_mem() @@ -1038,7 +1038,7 @@ fn (mut dl Dlmalloc) malloc_real(size usize) voidptr { @[unsafe] fn (mut dl Dlmalloc) init_bins() { unsafe { - for i in 0 .. dlmalloc.n_small_bins { + for i in 0 .. n_small_bins { mut bin := dl.smallbin_at(i) bin.prev = bin bin.next = bin @@ -1056,7 +1056,7 @@ fn (mut dl Dlmalloc) init_top(ptr &Chunk, size_ usize) { dl.top = p dl.topsize = size // C.VALGRIND_MAKE_MEM_UNDEFINED(p.plus_offset(sizeof(usize)),sizeof(usize)) - p.head = size | dlmalloc.pinuse + p.head = size | pinuse // C.VALGRIND_MAKE_MEM_UNDEFINED(p.plus_offset(size + sizeof(usize)),sizeof(usize)) p.plus_offset(size).head = top_foot_size() dl.trim_check = u32(default_trim_threshold()) @@ -1088,7 +1088,7 @@ fn (mut dl Dlmalloc) sys_alloc(size usize) voidptr { dl.seg.base = tbase dl.seg.size = tsize dl.seg.flags = flags - dl.release_checks = dlmalloc.max_release_check_rate + dl.release_checks = max_release_check_rate dl.init_bins() tsize_ := tsize - top_foot_size() dl.init_top(&Chunk(tbase), tsize_) @@ -1131,7 +1131,7 @@ fn (mut dl Dlmalloc) sys_alloc(size usize) voidptr { mut p := dl.top dl.top = p.plus_offset(size) mut r := dl.top - r.head = rsize | dlmalloc.pinuse + r.head = rsize | pinuse p.set_size_and_pinuse_of_inuse_chunk(size) ret := p.to_mem() @@ -1270,7 +1270,7 @@ fn (mut dl Dlmalloc) prepend_alloc(newbase voidptr, oldbase voidptr, size usize) dl.topsize += qsize tsize := dl.topsize dl.top = q - q.head = tsize | dlmalloc.pinuse + q.head = tsize | pinuse } else if voidptr(oldfirst) == voidptr(dl.dv) { dl.dvsize += qsize dsize := dl.dvsize @@ -1475,7 +1475,7 @@ fn (mut dl Dlmalloc) try_realloc_chunk(p_ &Chunk, nb usize, can_move bool) &Chun newtopsize := newsize - nb mut newtop := p.plus_offset(nb) p.set_inuse(nb) - newtop.head = newtopsize | dlmalloc.pinuse + newtop.head = newtopsize | pinuse dl.top = newtop dl.topsize = newtopsize return p @@ -1589,7 +1589,7 @@ fn (mut dl Dlmalloc) dispose_chunk(p_ &Chunk, psize_ usize) { p = prev if voidptr(p) != voidptr(dl.dv) { dl.unlink_chunk(p, prevsize) - } else if next.head & dlmalloc.inuse == dlmalloc.inuse { + } else if next.head & inuse == inuse { dl.dvsize = psize p.set_free_with_pinuse(psize, next) return @@ -1601,7 +1601,7 @@ fn (mut dl Dlmalloc) dispose_chunk(p_ &Chunk, psize_ usize) { dl.topsize += psize tsize := dl.topsize dl.top = p - p.head = tsize | dlmalloc.pinuse + p.head = tsize | pinuse if voidptr(p) == voidptr(dl.dv) { dl.dv = nil dl.dvsize = 0 diff --git a/vlib/encoding/base32/base32.v b/vlib/encoding/base32/base32.v index 40f4fdfc24bf29..e0caac8406c5b4 100644 --- a/vlib/encoding/base32/base32.v +++ b/vlib/encoding/base32/base32.v @@ -40,7 +40,7 @@ pub fn decode_to_string(src []u8) !string { // decode decodes a byte array `src` using Base32 // and returns the decoded bytes or a `corrupt_input_error_msg` error. pub fn decode(src []u8) ![]u8 { - mut e := new_encoding(base32.std_alphabet) + mut e := new_encoding(std_alphabet) return e.decode(src) } @@ -59,7 +59,7 @@ pub fn encode_to_string(src []u8) string { // encode encodes a byte array `src` using Base32 and returns the // encoded bytes. pub fn encode(src []u8) []u8 { - e := new_encoding(base32.std_alphabet) + e := new_encoding(std_alphabet) return e.encode(src) } @@ -78,7 +78,7 @@ pub fn (enc &Encoding) encode_string_to_string(src string) string { // new_std_encoding creates a standard Base32 `Encoding` as defined in // RFC 4648. pub fn new_std_encoding() Encoding { - return new_encoding_with_padding(base32.std_alphabet, base32.std_padding) + return new_encoding_with_padding(std_alphabet, std_padding) } // new_std_encoding creates a standard Base32 `Encoding` identical to @@ -88,13 +88,13 @@ pub fn new_std_encoding() Encoding { // be contained in the `Encoding`'s alphabet and must be a rune equal or // below '\xff'. pub fn new_std_encoding_with_padding(padding u8) Encoding { - return new_encoding_with_padding(base32.std_alphabet, padding) + return new_encoding_with_padding(std_alphabet, padding) } // new_encoding returns a Base32 `Encoding` with standard // `alphabet`s and standard padding. pub fn new_encoding(alphabet []u8) Encoding { - return new_encoding_with_padding(alphabet, base32.std_padding) + return new_encoding_with_padding(alphabet, std_padding) } // new_encoding_with_padding returns a Base32 `Encoding` with specified @@ -193,7 +193,7 @@ fn (enc &Encoding) encode_(src_ []u8, mut dst []u8) { // Pad the final quantum if src.len < 5 { - if enc.padding_char == base32.no_padding { + if enc.padding_char == no_padding { break } @@ -218,7 +218,7 @@ fn (enc &Encoding) encode_(src_ []u8, mut dst []u8) { } fn (enc &Encoding) encoded_len(n int) int { - if enc.padding_char == base32.no_padding { + if enc.padding_char == no_padding { return (n * 8 + 4) / 5 } return (n + 4) / 5 * 8 @@ -276,7 +276,7 @@ fn (enc &Encoding) decode_(src_ []u8, mut dst []u8) !(int, bool) { for j := 0; j < 8; { if src.len == 0 { - if enc.padding_char != base32.no_padding { + if enc.padding_char != no_padding { // We have reached the end and are missing padding // return n, false, corrupt_input_error(olen - src.len - j) return error(corrupt_input_error_msg(olen - src.len - j)) diff --git a/vlib/encoding/csv/csv_reader_random_access.v b/vlib/encoding/csv/csv_reader_random_access.v index 94919515c09909..668403bcc7b792 100644 --- a/vlib/encoding/csv/csv_reader_random_access.v +++ b/vlib/encoding/csv/csv_reader_random_access.v @@ -55,7 +55,7 @@ pub mut: end_index i64 = -1 end_line u8 = `\n` - end_line_len int = csv.endline_cr_len // size of the endline rune \n = 1, \r\n = 2 + end_line_len int = endline_cr_len // size of the endline rune \n = 1, \r\n = 2 separator u8 = `,` // comma is the default separator separator_len int = 1 // size of the separator rune quote u8 = `"` // double quote is the standard quote char @@ -91,7 +91,7 @@ pub: comment u8 = `#` // every line that start with the quote char is ignored default_cell string = '*' // return this string if out of the csv boundaries empty_cell string // return this string if empty cell - end_line_len int = csv.endline_cr_len // size of the endline rune + end_line_len int = endline_cr_len // size of the endline rune quote u8 = `"` // double quote is the standard quote char quote_remove bool // if true clear the cell from the quotes } @@ -115,7 +115,7 @@ pub fn csv_reader(cfg RandomAccessReaderConfig) !&RandomAccessReader { cr.end_index = cfg.end_index if cfg.scr_buf != 0 && cfg.scr_buf_len > 0 { - cr.mem_buf_type = csv.ram_csv // RAM buffer + cr.mem_buf_type = ram_csv // RAM buffer cr.mem_buf = cfg.scr_buf cr.mem_buf_size = cfg.scr_buf_len if cfg.end_index == -1 { @@ -136,7 +136,7 @@ pub fn csv_reader(cfg RandomAccessReaderConfig) !&RandomAccessReader { if !os.exists(cfg.file_path) { return error('ERROR: file ${cfg.file_path} not found!') } - cr.mem_buf_type = csv.file_csv // File buffer + cr.mem_buf_type = file_csv // File buffer // allocate the memory unsafe { cr.mem_buf = malloc(cfg.mem_buf_size) @@ -184,9 +184,9 @@ pub fn csv_reader(cfg RandomAccessReaderConfig) !&RandomAccessReader { // dispose_csv_reader release the resources used by the csv_reader pub fn (mut cr RandomAccessReader) dispose_csv_reader() { - if cr.mem_buf_type == csv.ram_csv { + if cr.mem_buf_type == ram_csv { // do nothing, ram buffer is static - } else if cr.mem_buf_type == csv.file_csv { + } else if cr.mem_buf_type == file_csv { // file close if cr.f.is_opened { cr.f.close() @@ -205,7 +205,7 @@ pub fn (mut cr RandomAccessReader) dispose_csv_reader() { fn (mut cr RandomAccessReader) fill_buffer(i i64) !i64 { // use ram - if cr.mem_buf_type == csv.ram_csv { + if cr.mem_buf_type == ram_csv { // do nothing, ram buffer are static for now cr.mem_buf_start = i cr.mem_buf_end = cr.mem_buf_size @@ -213,7 +213,7 @@ fn (mut cr RandomAccessReader) fill_buffer(i i64) !i64 { // println("fill_buffer RAM: ${i} read_bytes_count: ${read_bytes_count} mem_buf_start: ${cr.mem_buf_start} mem_buf_end: ${cr.mem_buf_end}") return i64(read_bytes_count) // use file - } else if cr.mem_buf_type == csv.file_csv { + } else if cr.mem_buf_type == file_csv { cr.start_index = i cr.f.seek(cr.start_index, .start)! // IMPORTANT: add 64 bit support in vlib!! @@ -240,7 +240,7 @@ pub fn (mut cr RandomAccessReader) map_csv() ! { mut quote_flag := false // true if we are parsing inside a quote // if File return to the start of the file - if cr.mem_buf_type == csv.file_csv { + if cr.mem_buf_type == file_csv { cr.f.seek(cr.start_index, .start)! } @@ -325,7 +325,7 @@ pub fn (mut cr RandomAccessReader) map_csv() ! { } // if File return to the start of the file - if cr.mem_buf_type == csv.file_csv { + if cr.mem_buf_type == file_csv { cr.f.seek(cr.start_index, .start)! } @@ -522,7 +522,7 @@ pub fn (mut cr RandomAccessReader) rows_count() !i64 { mut count := i64(0) mut i := i64(0) - if cr.mem_buf_type == csv.file_csv { + if cr.mem_buf_type == file_csv { cr.f.seek(cr.start_index, .start)! } unsafe { @@ -542,7 +542,7 @@ pub fn (mut cr RandomAccessReader) rows_count() !i64 { i += read_bytes_count } } - if cr.mem_buf_type == csv.file_csv { + if cr.mem_buf_type == file_csv { cr.f.seek(cr.start_index, .start)! } // println("rows_count Done!") diff --git a/vlib/encoding/html/escape.v b/vlib/encoding/html/escape.v index 422726a10d4c7c..16ecf51d34748b 100644 --- a/vlib/encoding/html/escape.v +++ b/vlib/encoding/html/escape.v @@ -28,9 +28,9 @@ const unescape_quote_seq = ['"', '"', ''', "'"] // support through `string` is robust enough to deal with these cases. pub fn escape(input string, config EscapeConfig) string { return if config.quote { - input.replace_each(html.escape_seq).replace_each(html.escape_quote_seq) + input.replace_each(escape_seq).replace_each(escape_quote_seq) } else { - input.replace_each(html.escape_seq) + input.replace_each(escape_seq) } } @@ -41,9 +41,9 @@ pub fn unescape(input string, config UnescapeConfig) string { return if config.all { unescape_all(input) } else if config.quote { - input.replace_each(html.unescape_seq).replace_each(html.unescape_quote_seq) + input.replace_each(unescape_seq).replace_each(unescape_quote_seq) } else { - input.replace_each(html.unescape_seq) + input.replace_each(unescape_seq) } } diff --git a/vlib/encoding/txtar/txtar.v b/vlib/encoding/txtar/txtar.v index 1a0d6114752f5d..3db8f162f6e799 100644 --- a/vlib/encoding/txtar/txtar.v +++ b/vlib/encoding/txtar/txtar.v @@ -58,7 +58,7 @@ fn find_file_marker(data string) (string, string, string) { if name != '' { return data[..i], name, after } - j := data[i..].index(txtar.nlm) or { return fix_nl(data), '', '' } + j := data[i..].index(nlm) or { return fix_nl(data), '', '' } i += j + 1 // positioned at start of new possible marker } return '', '', '' @@ -68,7 +68,7 @@ fn find_file_marker(data string) (string, string, string) { // If so, it returns the name from the line, and the data after the line. // Otherwise it returns name == "". fn is_marker(data string) (string, string) { - if !data.starts_with(txtar.mstart) { + if !data.starts_with(mstart) { return '', '' } mut ndata := data @@ -77,10 +77,10 @@ fn is_marker(data string) (string, string) { if i >= 0 { ndata, after = data[..i], data[i + 1..] } - if !(ndata.ends_with(txtar.mend) && ndata.len >= txtar.mstart.len + txtar.mend.len) { + if !(ndata.ends_with(mend) && ndata.len >= mstart.len + mend.len) { return '', '' } - name := ndata[txtar.mstart.len..ndata.len - txtar.mend.len].trim_space() + name := ndata[mstart.len..ndata.len - mend.len].trim_space() return name, after } diff --git a/vlib/encoding/utf8/east_asian/east_asian_width.v b/vlib/encoding/utf8/east_asian/east_asian_width.v index eb0468e6454a9b..21c6ba121a7185 100644 --- a/vlib/encoding/utf8/east_asian/east_asian_width.v +++ b/vlib/encoding/utf8/east_asian/east_asian_width.v @@ -34,10 +34,10 @@ pub fn display_width(s string, ambiguous_width int) int { // width_property_at returns the East Asian Width properties at string[index] pub fn east_asian_width_property_at(s string, index int) EastAsianWidthProperty { codepoint := utf8.get_uchar(s, index) - mut left, mut right := 0, east_asian.east_asian_width_data.len - 1 + mut left, mut right := 0, east_asian_width_data.len - 1 for left <= right { middle := left + ((right - left) / 2) - entry := east_asian.east_asian_width_data[middle] + entry := east_asian_width_data[middle] if codepoint < entry.point { right = middle - 1 continue diff --git a/vlib/encoding/utf8/utf8_tables.v b/vlib/encoding/utf8/utf8_tables.v index 12dc9eb7122831..6c982883bcbcf4 100644 --- a/vlib/encoding/utf8/utf8_tables.v +++ b/vlib/encoding/utf8/utf8_tables.v @@ -1332,7 +1332,7 @@ fn is_excluding_latin(table &RangeTable, r rune) bool { const linear_max = 18 fn is_16(ranges []Range16, r u16) bool { - if ranges.len <= utf8.linear_max && r <= utf8.max_latin_1 { + if ranges.len <= linear_max && r <= max_latin_1 { for range in ranges { if r < range.lo { return false @@ -1363,7 +1363,7 @@ fn is_16(ranges []Range16, r u16) bool { } fn is_32(ranges []Range32, r u32) bool { - if ranges.len <= utf8.linear_max && r <= utf8.max_latin_1 { + if ranges.len <= linear_max && r <= max_latin_1 { for range in ranges { if r < range.lo { return false diff --git a/vlib/encoding/utf8/utf8_util.v b/vlib/encoding/utf8/utf8_util.v index c02b02890c7956..d39e4b51c69ae1 100644 --- a/vlib/encoding/utf8/utf8_util.v +++ b/vlib/encoding/utf8/utf8_util.v @@ -176,7 +176,7 @@ pub fn is_number(r rune) bool { // is_uchar_punct return true if the input unicode is a western unicode punctuation pub fn is_uchar_punct(uchar int) bool { - return find_punct_in_table(uchar, utf8.unicode_punct_western) != 0 + return find_punct_in_table(uchar, unicode_punct_western) != 0 } // Global @@ -188,7 +188,7 @@ pub fn is_global_punct(s string, index int) bool { // is_uchar_global_punct return true if the input unicode is a global unicode punctuation pub fn is_uchar_global_punct(uchar int) bool { - return find_punct_in_table(uchar, utf8.unicode_punct) != 0 + return find_punct_in_table(uchar, unicode_punct) != 0 } // Private functions diff --git a/vlib/encoding/xml/entity.v b/vlib/encoding/xml/entity.v index 34ce49ced75c1d..a70dc2b3d5a1da 100644 --- a/vlib/encoding/xml/entity.v +++ b/vlib/encoding/xml/entity.v @@ -21,7 +21,7 @@ pub const default_entities_reverse = { @[params] pub struct EscapeConfig { pub: - reverse_entities map[string]string = xml.default_entities_reverse + reverse_entities map[string]string = default_entities_reverse } // escape_text replaces all entities in the given string with their respective @@ -40,7 +40,7 @@ pub fn escape_text(content string, config EscapeConfig) string { @[params] pub struct UnescapeConfig { pub: - entities map[string]string = xml.default_entities + entities map[string]string = default_entities } // unescape_text replaces all entities in the given string with their respective diff --git a/vlib/encoding/xml/parser.v b/vlib/encoding/xml/parser.v index 1a755dccd50743..1bfa19c4447afc 100644 --- a/vlib/encoding/xml/parser.v +++ b/vlib/encoding/xml/parser.v @@ -93,7 +93,7 @@ fn parse_attributes(attribute_contents string) !map[string]string { } fn parse_comment(mut reader io.Reader) !XMLComment { - mut comment_buffer := strings.new_builder(xml.default_string_builder_cap) + mut comment_buffer := strings.new_builder(default_string_builder_cap) mut local_buf := [u8(0)] for { @@ -128,7 +128,7 @@ enum CDATAParserState { } fn parse_cdata(mut reader io.Reader) !XMLCData { - mut contents_buf := strings.new_builder(xml.default_string_builder_cap) + mut contents_buf := strings.new_builder(default_string_builder_cap) mut state := CDATAParserState.normal mut local_buf := [u8(0)] @@ -176,7 +176,7 @@ fn parse_cdata(mut reader io.Reader) !XMLCData { fn parse_entity(contents string) !(DTDEntity, string) { // We find the nearest '>' to the start of the ENTITY entity_end := contents.index('>') or { return error('Entity declaration not closed.') } - entity_contents := contents[xml.entity_len..entity_end] + entity_contents := contents[entity_len..entity_end] name := entity_contents.trim_left(' \t\n').all_before(' ') if name == '' { @@ -195,7 +195,7 @@ fn parse_entity(contents string) !(DTDEntity, string) { fn parse_element(contents string) !(DTDElement, string) { // We find the nearest '>' to the start of the ELEMENT element_end := contents.index('>') or { return error('Element declaration not closed.') } - element_contents := contents[xml.element_len..element_end].trim_left(' \t\n') + element_contents := contents[element_len..element_end].trim_left(' \t\n') mut name_span := TextSpan{} @@ -248,7 +248,7 @@ fn parse_element(contents string) !(DTDElement, string) { fn parse_doctype(mut reader io.Reader) !DocumentType { // We may have more < in the doctype so keep count mut depth := 1 - mut doctype_buffer := strings.new_builder(xml.default_string_builder_cap) + mut doctype_buffer := strings.new_builder(default_string_builder_cap) mut local_buf := [u8(0)] for { ch := next_char(mut reader, mut local_buf)! @@ -309,13 +309,13 @@ fn parse_prolog(mut reader io.Reader) !(Prolog, u8) { `<` { break } - xml.byte_order_marking_first { + byte_order_marking_first { // UTF-8 BOM mut bom_buf := [u8(0), 0] if reader.read(mut bom_buf)! != 2 { return error('Invalid UTF-8 BOM.') } - if bom_buf != xml.byte_order_marking_bytes { + if bom_buf != byte_order_marking_bytes { return error('Invalid UTF-8 BOM.') } ch = next_char(mut reader, mut local_buf)! @@ -347,7 +347,7 @@ fn parse_prolog(mut reader io.Reader) !(Prolog, u8) { return error('Expecting a prolog starting with " DATA mut cdata_buf := []u8{len: 4} if reader.read(mut cdata_buf)! != 4 { return error('Invalid XML. Incomplete CDATA declaration.') } - if cdata_buf != xml.data_chars { + if cdata_buf != data_chars { return error('Invalid XML. Expected "CDATA" after "" tag mut skip := []u8{len: 6} @@ -105,7 +105,7 @@ fn test_single_element_parsing() ! { mut count := 0 - for count < xml.xml_elements.len { + for count < xml_elements.len { match ch { `<` { next_ch := next_char(mut reader, mut local_buf)! @@ -113,7 +113,7 @@ fn test_single_element_parsing() ! { `/` {} else { parsed_element := parse_single_node(next_ch, mut reader)! - assert xml.xml_elements[count] == parsed_element + assert xml_elements[count] == parsed_element count++ } } diff --git a/vlib/eventbus/eventbus.v b/vlib/eventbus/eventbus.v index 9f055a949206a8..57a00b7825ff94 100644 --- a/vlib/eventbus/eventbus.v +++ b/vlib/eventbus/eventbus.v @@ -69,7 +69,7 @@ const dedup_buffer_len = 20 // publish publish an event with provided Params & name. fn (mut pb Publisher[T]) publish(name T, sender voidptr, args voidptr) { // println('Publisher.publish(name=${name} sender=${sender} args=${args})') - mut handled_receivers := unsafe { [eventbus.dedup_buffer_len]voidptr{} } // handle duplicate bugs TODO fix properly + perf + mut handled_receivers := unsafe { [dedup_buffer_len]voidptr{} } // handle duplicate bugs TODO fix properly + perf // is_key_down := name == 'on_key_down' mut j := 0 for event in pb.registry.events { @@ -83,7 +83,7 @@ fn (mut pb Publisher[T]) publish(name T, sender voidptr, args voidptr) { event.handler(event.receiver, args, sender) // handled_receivers << event.receiver handled_receivers[j] = event.receiver - j = (j + 1) % eventbus.dedup_buffer_len + j = (j + 1) % dedup_buffer_len } } pb.registry.events = pb.registry.events.filter(!(it.name == name && it.once)) diff --git a/vlib/flag/flag.v b/vlib/flag/flag.v index 8f47a89b8cf7be..61194a8e9dbe72 100644 --- a/vlib/flag/flag.v +++ b/vlib/flag/flag.v @@ -144,7 +144,7 @@ pub fn new_flag_parser(args []string) &FlagParser { idx_dashdash: idx_dashdash all_after_dashdash: all_after_dashdash args: all_before_dashdash - max_free_args: flag.max_args_number + max_free_args: max_args_number } } @@ -502,8 +502,8 @@ pub fn (mut fs FlagParser) string(name string, abbr u8, sdefault string, usage s // at least `n` in length. If the user gives less free arguments to the program, // the parser will return an error. pub fn (mut fs FlagParser) limit_free_args_to_at_least(n int) ! { - if n > flag.max_args_number { - return error('flag.limit_free_args_to_at_least expect n to be smaller than ${flag.max_args_number}') + if n > max_args_number { + return error('flag.limit_free_args_to_at_least expect n to be smaller than ${max_args_number}') } if n <= 0 { return error('flag.limit_free_args_to_at_least expect n to be a positive number') @@ -515,8 +515,8 @@ pub fn (mut fs FlagParser) limit_free_args_to_at_least(n int) ! { // at exactly `n` in length. If the user gives more or less free arguments to the program, // the parser will return an error. pub fn (mut fs FlagParser) limit_free_args_to_exactly(n int) ! { - if n > flag.max_args_number { - return error('flag.limit_free_args_to_exactly expect n to be smaller than ${flag.max_args_number}') + if n > max_args_number { + return error('flag.limit_free_args_to_exactly expect n to be smaller than ${max_args_number}') } if n < 0 { return error('flag.limit_free_args_to_exactly expect n to be a non negative number') @@ -547,7 +547,7 @@ pub fn (mut fs FlagParser) arguments_description(description string) { // That screen is usually shown when the `--help` option is given to the program. pub fn (fs FlagParser) usage() string { positive_min_arg := (fs.min_free_args > 0) - positive_max_arg := (fs.max_free_args > 0 && fs.max_free_args != flag.max_args_number) + positive_max_arg := (fs.max_free_args > 0 && fs.max_free_args != max_args_number) no_arguments := (fs.min_free_args == 0 && fs.max_free_args == 0) mut adesc := if fs.args_description.len > 0 { fs.args_description } else { '[ARGS]' } if no_arguments { @@ -556,7 +556,7 @@ pub fn (fs FlagParser) usage() string { mut use := []string{} if fs.application_version != '' { use << '${fs.application_name} ${fs.application_version}' - use << '${flag.underline}' + use << '${underline}' } if fs.usage_examples.len == 0 { use << 'Usage: ${fs.application_name} [options] ${adesc}' @@ -611,10 +611,10 @@ pub fn (fs FlagParser) usage() string { } option_names := ' ' + onames.join(', ') mut xspace := '' - if option_names.len > flag.space.len - 2 { - xspace = '\n${flag.space}' + if option_names.len > space.len - 2 { + xspace = '\n${space}' } else { - xspace = flag.space[option_names.len..] + xspace = space[option_names.len..] } fdesc := '${option_names}${xspace}${f.usage}' use << fdesc diff --git a/vlib/gg/draw.c.v b/vlib/gg/draw.c.v index 989cdcdd018482..20e2d60b3570a8 100644 --- a/vlib/gg/draw.c.v +++ b/vlib/gg/draw.c.v @@ -533,8 +533,8 @@ const small_circle_segments = [0, 2, 4, 6, 6, 8, 8, 13, 10, 18, 12, 12, 10, 13, fn radius_to_segments(r f32) int { if r < 30 { ir := int(math.ceil(r)) - if ir > 0 && ir < gg.small_circle_segments.len { - return gg.small_circle_segments[ir] + if ir > 0 && ir < small_circle_segments.len { + return small_circle_segments[ir] } return ir } diff --git a/vlib/gg/gg.c.v b/vlib/gg/gg.c.v index eb08ffb7cdb30c..8a1ff1505da5b1 100644 --- a/vlib/gg/gg.c.v +++ b/vlib/gg/gg.c.v @@ -611,7 +611,7 @@ pub fn (ctx &Context) end(options EndOptions) { create_default_pass(ctx.clear_pass) } .passthru { - create_default_pass(gg.dontcare_pass) + create_default_pass(dontcare_pass) } } gfx.begin_pass(pass) diff --git a/vlib/gg/gg.js.v b/vlib/gg/gg.js.v index 391388d892701e..ad86ab5745081a 100644 --- a/vlib/gg/gg.js.v +++ b/vlib/gg/gg.js.v @@ -258,7 +258,7 @@ pub: const size = Size{0, 0} pub fn window_size() Size { - return gg.size + return size } pub struct Context { @@ -324,7 +324,7 @@ pub fn new_context(cfg Config) &Context { g.width = cfg.width g.height = cfg.height g.ui_mode = cfg.ui_mode - mut sz := gg.size + mut sz := size sz.height = g.height sz.width = g.width g.config = cfg diff --git a/vlib/gg/m4/matrix.v b/vlib/gg/m4/matrix.v index 1328ef7e60f8eb..f5820705ac1fb3 100644 --- a/vlib/gg/m4/matrix.v +++ b/vlib/gg/m4/matrix.v @@ -46,7 +46,7 @@ pub fn (a Mat4) clean() Mat4 { unsafe { x := Mat4{} for c, value in a.e { - if f32_abs(value) < m4.precision { + if f32_abs(value) < precision { x.e[c] = 0 } else { x.e[c] = value @@ -70,7 +70,7 @@ pub fn (x Mat4) sum_all() f32 { pub fn (x Mat4) is_equal(y Mat4) bool { unsafe { for c, value in x.e { - if f32_abs(value - y.e[c]) > m4.precision { + if f32_abs(value - y.e[c]) > precision { return false } } diff --git a/vlib/gg/recorder.c.v b/vlib/gg/recorder.c.v index 5332c28f4081b3..b73be62f0fddae 100644 --- a/vlib/gg/recorder.c.v +++ b/vlib/gg/recorder.c.v @@ -7,15 +7,15 @@ import os // record_frame acts according to settings specified in `gg.recorder_settings`. @[if gg_record ?] pub fn (mut ctx Context) record_frame() { - if ctx.frame in gg.recorder_settings.screenshot_frames { - screenshot_file_path := '${gg.recorder_settings.screenshot_prefix}${ctx.frame}.png' + if ctx.frame in recorder_settings.screenshot_frames { + screenshot_file_path := '${recorder_settings.screenshot_prefix}${ctx.frame}.png' $if gg_record_trace ? { eprintln('>>> screenshoting ${screenshot_file_path}') } sapp.screenshot_png(screenshot_file_path) or { panic(err) } } - if ctx.frame == gg.recorder_settings.stop_at_frame { + if ctx.frame == recorder_settings.stop_at_frame { $if gg_record_trace ? { eprintln('>>> exiting at frame ${ctx.frame}') } diff --git a/vlib/gx/color.v b/vlib/gx/color.v index 54b76687702a9f..04fc39fea2ae6c 100644 --- a/vlib/gx/color.v +++ b/vlib/gx/color.v @@ -309,7 +309,7 @@ pub fn color_from_string(s string) Color { mut hex_str := '0x' + s[1..] return hex(hex_str.int()) } else { - return gx.string_colors[s] + return string_colors[s] } } diff --git a/vlib/hash/crc32/crc32.v b/vlib/hash/crc32/crc32.v index 2efb403f11aa73..828288f09ad44b 100644 --- a/vlib/hash/crc32/crc32.v +++ b/vlib/hash/crc32/crc32.v @@ -58,6 +58,6 @@ pub fn new(poly int) &Crc32 { // sum calculates the CRC-32 checksum of `b` by using the IEEE polynomial. pub fn sum(b []u8) u32 { - c := new(int(crc32.ieee)) + c := new(int(ieee)) return c.sum32(b) } diff --git a/vlib/hash/fnv1a/fnv1a.v b/vlib/hash/fnv1a/fnv1a.v index c7a8aa6294575a..459144ff8c413a 100644 --- a/vlib/hash/fnv1a/fnv1a.v +++ b/vlib/hash/fnv1a/fnv1a.v @@ -11,9 +11,9 @@ const fnv32_prime = u32(16777619) // sum32 returns a fnv1a hash of the string, described by `data` @[direct_array_access; inline] pub fn sum32_string(data string) u32 { - mut hash := fnv1a.fnv32_offset_basis + mut hash := fnv32_offset_basis for i in 0 .. data.len { - hash = (hash ^ u32(data[i])) * fnv1a.fnv32_prime + hash = (hash ^ u32(data[i])) * fnv32_prime } return hash } @@ -22,9 +22,9 @@ pub fn sum32_string(data string) u32 { // byte array `data`. @[direct_array_access; inline] pub fn sum32(data []u8) u32 { - mut hash := fnv1a.fnv32_offset_basis + mut hash := fnv32_offset_basis for i in 0 .. data.len { - hash = (hash ^ u32(data[i])) * fnv1a.fnv32_prime + hash = (hash ^ u32(data[i])) * fnv32_prime } return hash } @@ -34,9 +34,9 @@ pub fn sum32(data []u8) u32 { pub fn sum32_struct[T](s &T) u32 { bp := unsafe { &u8(s) } sz := int(sizeof(T)) - mut hash := fnv1a.fnv32_offset_basis + mut hash := fnv32_offset_basis for i in 0 .. sz { - hash = unsafe { (hash ^ u32(bp[i])) * fnv1a.fnv32_prime } + hash = unsafe { (hash ^ u32(bp[i])) * fnv32_prime } } return hash } @@ -45,9 +45,9 @@ pub fn sum32_struct[T](s &T) u32 { // the address in the given &byte pointer `data`. @[direct_array_access; inline; unsafe] pub fn sum32_bytes(data &u8, data_len int) u32 { - mut hash := fnv1a.fnv32_offset_basis + mut hash := fnv32_offset_basis for i in 0 .. data_len { - hash = unsafe { (hash ^ u32(data[i])) * fnv1a.fnv32_prime } + hash = unsafe { (hash ^ u32(data[i])) * fnv32_prime } } return hash } @@ -55,9 +55,9 @@ pub fn sum32_bytes(data &u8, data_len int) u32 { // sum64 returns a fnv1a hash of the string, described by `data` @[direct_array_access; inline] pub fn sum64_string(data string) u64 { - mut hash := fnv1a.fnv64_offset_basis + mut hash := fnv64_offset_basis for i in 0 .. data.len { - hash = (hash ^ u64(data[i])) * fnv1a.fnv64_prime + hash = (hash ^ u64(data[i])) * fnv64_prime } return hash } @@ -66,9 +66,9 @@ pub fn sum64_string(data string) u64 { // byte array `data`. @[direct_array_access; inline] pub fn sum64(data []u8) u64 { - mut hash := fnv1a.fnv64_offset_basis + mut hash := fnv64_offset_basis for i in 0 .. data.len { - hash = (hash ^ u64(data[i])) * fnv1a.fnv64_prime + hash = (hash ^ u64(data[i])) * fnv64_prime } return hash } @@ -77,9 +77,9 @@ pub fn sum64(data []u8) u64 { // the address in the given &byte pointer `data`. @[direct_array_access; inline; unsafe] pub fn sum64_bytes(data &u8, data_len int) u64 { - mut hash := fnv1a.fnv64_offset_basis + mut hash := fnv64_offset_basis for i in 0 .. data_len { - hash = unsafe { (hash ^ u64(data[i])) * fnv1a.fnv64_prime } + hash = unsafe { (hash ^ u64(data[i])) * fnv64_prime } } return hash } @@ -89,9 +89,9 @@ pub fn sum64_bytes(data &u8, data_len int) u64 { pub fn sum64_struct[T](s &T) u64 { bp := unsafe { &u8(s) } sz := int(sizeof(T)) - mut hash := fnv1a.fnv64_offset_basis + mut hash := fnv64_offset_basis for i in 0 .. sz { - hash = unsafe { (hash ^ u64(bp[i])) * fnv1a.fnv64_prime } + hash = unsafe { (hash ^ u64(bp[i])) * fnv64_prime } } return hash } diff --git a/vlib/io/io.v b/vlib/io/io.v index f19c47dcd8a45c..8982b0386705bc 100644 --- a/vlib/io/io.v +++ b/vlib/io/io.v @@ -7,7 +7,7 @@ const buf_max_len = 1024 // until either EOF is reached on `src` or an error occurs. // An error is returned if an error is encountered during write. pub fn cp(mut src Reader, mut dst Writer) ! { - mut buf := []u8{len: io.buf_max_len} + mut buf := []u8{len: buf_max_len} for { len := src.read(mut buf) or { break } dst.write(buf[..len]) or { return err } diff --git a/vlib/io/reader.v b/vlib/io/reader.v index 3bf1e7b835c10b..f1937bf2807cc6 100644 --- a/vlib/io/reader.v +++ b/vlib/io/reader.v @@ -46,7 +46,7 @@ pub fn read_all(config ReadAllConfig) ![]u8 { mut r := config.reader read_till_eof := config.read_to_end_of_stream - mut b := []u8{len: io.read_all_len} + mut b := []u8{len: read_all_len} mut read := 0 for { new_read := r.read(mut b[read..]) or { break } @@ -55,7 +55,7 @@ pub fn read_all(config ReadAllConfig) ![]u8 { break } if b.len == read { - unsafe { b.grow_len(io.read_all_grow_len) } + unsafe { b.grow_len(read_all_grow_len) } } } return b[..read] @@ -64,7 +64,7 @@ pub fn read_all(config ReadAllConfig) ![]u8 { // read_any reads any available bytes from a reader // (until the reader returns a read of 0 length). pub fn read_any(mut r Reader) ![]u8 { - mut b := []u8{len: io.read_all_len} + mut b := []u8{len: read_all_len} mut read := 0 for { new_read := r.read(mut b[read..]) or { return error('none') } @@ -73,7 +73,7 @@ pub fn read_any(mut r Reader) ![]u8 { break } if b.len == read { - unsafe { b.grow_len(io.read_all_grow_len) } + unsafe { b.grow_len(read_all_grow_len) } } } return b[..read] diff --git a/vlib/io/reader_test.v b/vlib/io/reader_test.v index 74bcc0b58f2590..e754f8c060ceec 100644 --- a/vlib/io/reader_test.v +++ b/vlib/io/reader_test.v @@ -56,7 +56,7 @@ fn (mut s StringReaderTest) read(mut buf []u8) !int { const newline_count = 100000 fn test_stringreadertest() { - text := '12345\n'.repeat(io.newline_count) + text := '12345\n'.repeat(newline_count) mut s := StringReaderTest{ text: text } @@ -64,7 +64,7 @@ fn test_stringreadertest() { for i := 0; true; i++ { if _ := r.read_line() { } else { - assert i == io.newline_count + assert i == newline_count break } } @@ -81,7 +81,7 @@ fn test_stringreadertest() { } fn test_stringreadertest2() { - text := '12345\r\n'.repeat(io.newline_count) + text := '12345\r\n'.repeat(newline_count) mut s := StringReaderTest{ text: text } @@ -89,7 +89,7 @@ fn test_stringreadertest2() { for i := 0; true; i++ { if _ := r.read_line() { } else { - assert i == io.newline_count + assert i == newline_count break } } diff --git a/vlib/io/util/util.v b/vlib/io/util/util.v index 0d899ed1d6260f..a22332892cb9c6 100644 --- a/vlib/io/util/util.v +++ b/vlib/io/util/util.v @@ -24,7 +24,7 @@ pub fn temp_file(tfo TempFileOptions) !(os.File, string) { } d = d.trim_right(os.path_separator) prefix, suffix := prefix_and_suffix(tfo.pattern) or { return error(@FN + ' ${err.msg()}') } - for retry := 0; retry < util.retries; retry++ { + for retry := 0; retry < retries; retry++ { path := os.join_path(d, prefix + random_number() + suffix) mut mode := 'rw+' $if windows { @@ -36,7 +36,7 @@ pub fn temp_file(tfo TempFileOptions) !(os.File, string) { } } return error(@FN + - ' could not create temporary file in "${d}". Retry limit (${util.retries}) exhausted. Please ensure write permissions.') + ' could not create temporary file in "${d}". Retry limit (${retries}) exhausted. Please ensure write permissions.') } @[params] @@ -59,7 +59,7 @@ pub fn temp_dir(tdo TempFileOptions) !string { os.ensure_folder_is_writable(d) or { return error_for_temporary_folder(@FN, d) } d = d.trim_right(os.path_separator) prefix, suffix := prefix_and_suffix(tdo.pattern) or { return error(@FN + ' ${err.msg()}') } - for retry := 0; retry < util.retries; retry++ { + for retry := 0; retry < retries; retry++ { path := os.join_path(d, prefix + random_number() + suffix) os.mkdir_all(path) or { continue } if os.is_dir(path) && os.exists(path) { @@ -67,7 +67,7 @@ pub fn temp_dir(tdo TempFileOptions) !string { return path } } - return error('${@FN} could not create temporary directory "${d}". Retry limit (${util.retries}) exhausted.') + return error('${@FN} could not create temporary directory "${d}". Retry limit (${retries}) exhausted.') } // * Utility functions diff --git a/vlib/math/big/array_ops.v b/vlib/math/big/array_ops.v index 7a023197cf494b..82c79f29d6ec42 100644 --- a/vlib/math/big/array_ops.v +++ b/vlib/math/big/array_ops.v @@ -119,9 +119,9 @@ fn multiply_digit_array(operand_a []u32, operand_b []u32, mut storage []u32) { } else { operand_b.len } - if max_len >= big.toom3_multiplication_limit { + if max_len >= toom3_multiplication_limit { toom3_multiply_digit_array(operand_a, operand_b, mut storage) - } else if max_len >= big.karatsuba_multiplication_limit { + } else if max_len >= karatsuba_multiplication_limit { karatsuba_multiply_digit_array(operand_a, operand_b, mut storage) } else { simple_multiply_digit_array(operand_a, operand_b, mut storage) @@ -249,7 +249,7 @@ const newton_division_limit = 10_000 @[inline] fn divide_array_by_array(operand_a []u32, operand_b []u32, mut quotient []u32, mut remainder []u32) { - if operand_a.len >= big.newton_division_limit { + if operand_a.len >= newton_division_limit { newton_divide_array_by_array(operand_a, operand_b, mut quotient, mut remainder) } else { binary_divide_array_by_array(operand_a, operand_b, mut quotient, mut remainder) diff --git a/vlib/math/big/integer.v b/vlib/math/big/integer.v index 3d916af13bf51b..d288d863d04ca8 100644 --- a/vlib/math/big/integer.v +++ b/vlib/math/big/integer.v @@ -184,7 +184,7 @@ fn validate_string(characters string, radix u32) ! { for index := start_index; index < characters.len; index++ { digit := characters[index] - value := big.digit_array.index(digit) + value := digit_array.index(digit) if value == -1 { return error('math.big: Invalid character ${digit}') @@ -212,7 +212,7 @@ fn integer_from_special_string(characters string, chunk_size int) Integer { mut offset := 0 for index := characters.len - 1; index >= start_index; index-- { digit := characters[index] - value := u32(big.digit_array.index(digit)) + value := u32(digit_array.index(digit)) current |= value << offset offset += chunk_size @@ -254,7 +254,7 @@ fn integer_from_regular_string(characters string, radix u32) Integer { for index := start_index; index < characters.len; index++ { digit := characters[index] - value := big.digit_array.index(digit) + value := digit_array.index(digit) result *= radix_int result += integer_from_int(value) @@ -876,7 +876,7 @@ fn (integer Integer) general_radix_str(radix u32) string { mut rune_array := []rune{cap: current.digits.len * 4} for current.signum > 0 { new_current, digit = current.div_mod_internal(divisor) - rune_array << big.digit_array[digit.int()] + rune_array << digit_array[digit.int()] unsafe { digit.free() } unsafe { current.free() } current = new_current diff --git a/vlib/math/bits.v b/vlib/math/bits.v index 6749b4d3c0718f..080430c0479bf4 100644 --- a/vlib/math/bits.v +++ b/vlib/math/bits.v @@ -17,19 +17,19 @@ const frac_mask = u64((u64(1) << u64(shift)) - u64(1)) // inf returns positive infinity if sign >= 0, negative infinity if sign < 0. pub fn inf(sign int) f64 { - v := if sign >= 0 { math.uvinf } else { math.uvneginf } + v := if sign >= 0 { uvinf } else { uvneginf } return f64_from_bits(v) } // nan returns an IEEE 754 ``not-a-number'' value. pub fn nan() f64 { - return f64_from_bits(math.uvnan) + return f64_from_bits(uvnan) } // is_nan reports whether f is an IEEE 754 ``not-a-number'' value. pub fn is_nan(f f64) bool { $if fast_math { - if f64_bits(f) == math.uvnan { + if f64_bits(f) == uvnan { return true } } @@ -62,7 +62,7 @@ pub fn is_finite(f f64) bool { pub fn normalize(x f64) (f64, int) { smallest_normal := 2.2250738585072014e-308 // 2**-1022 if abs(x) < smallest_normal { - return x * math.normalize_smallest_mask, -52 + return x * normalize_smallest_mask, -52 } return x, 0 } diff --git a/vlib/math/bits/bits.v b/vlib/math/bits/bits.v index 286a14a42468e5..4dd89e3ec1ae51 100644 --- a/vlib/math/bits/bits.v +++ b/vlib/math/bits/bits.v @@ -57,7 +57,7 @@ pub fn trailing_zeros_16(x u16) int { return 16 } // see comment in trailing_zeros_64 - return int(bits.de_bruijn32tab[u32(x & -x) * bits.de_bruijn32 >> (32 - 5)]) + return int(de_bruijn32tab[u32(x & -x) * de_bruijn32 >> (32 - 5)]) } // trailing_zeros_32 returns the number of trailing zero bits in x; the result is 32 for x == 0. @@ -67,7 +67,7 @@ pub fn trailing_zeros_32(x u32) int { return 32 } // see comment in trailing_zeros_64 - return int(bits.de_bruijn32tab[(x & -x) * bits.de_bruijn32 >> (32 - 5)]) + return int(de_bruijn32tab[(x & -x) * de_bruijn32 >> (32 - 5)]) } // trailing_zeros_64 returns the number of trailing zero bits in x; the result is 64 for x == 0. @@ -87,7 +87,7 @@ pub fn trailing_zeros_64(x u64) int { // find by how many bits it was shifted by looking at which six bit // substring ended up at the top of the word. // (Knuth, volume 4, section 7.3.1) - return int(bits.de_bruijn64tab[(x & -x) * bits.de_bruijn64 >> (64 - 6)]) + return int(de_bruijn64tab[(x & -x) * de_bruijn64 >> (64 - 6)]) } // --- OnesCount --- @@ -131,9 +131,9 @@ pub fn ones_count_64(x u64) int { // Per "Hacker's Delight", the first line can be simplified // more, but it saves at best one instruction, so we leave // it alone for clarity. - mut y := (x >> u64(1) & (bits.m0 & max_u64)) + (x & (bits.m0 & max_u64)) - y = (y >> u64(2) & (bits.m1 & max_u64)) + (y & (bits.m1 & max_u64)) - y = ((y >> 4) + y) & (bits.m2 & max_u64) + mut y := (x >> u64(1) & (m0 & max_u64)) + (x & (m0 & max_u64)) + y = (y >> u64(2) & (m1 & max_u64)) + (y & (m1 & max_u64)) + y = ((y >> 4) + y) & (m2 & max_u64) y += y >> 8 y += y >> 16 y += y >> 32 @@ -152,8 +152,8 @@ const n64 = u64(64) // This function's execution time does not depend on the inputs. @[inline] pub fn rotate_left_8(x u8, k int) u8 { - s := u8(k) & (bits.n8 - u8(1)) - return (x << s) | (x >> (bits.n8 - s)) + s := u8(k) & (n8 - u8(1)) + return (x << s) | (x >> (n8 - s)) } // rotate_left_16 returns the value of x rotated left by (k mod 16) bits. @@ -162,8 +162,8 @@ pub fn rotate_left_8(x u8, k int) u8 { // This function's execution time does not depend on the inputs. @[inline] pub fn rotate_left_16(x u16, k int) u16 { - s := u16(k) & (bits.n16 - u16(1)) - return (x << s) | (x >> (bits.n16 - s)) + s := u16(k) & (n16 - u16(1)) + return (x << s) | (x >> (n16 - s)) } // rotate_left_32 returns the value of x rotated left by (k mod 32) bits. @@ -172,8 +172,8 @@ pub fn rotate_left_16(x u16, k int) u16 { // This function's execution time does not depend on the inputs. @[inline] pub fn rotate_left_32(x u32, k int) u32 { - s := u32(k) & (bits.n32 - u32(1)) - return (x << s) | (x >> (bits.n32 - s)) + s := u32(k) & (n32 - u32(1)) + return (x << s) | (x >> (n32 - s)) } // rotate_left_64 returns the value of x rotated left by (k mod 64) bits. @@ -182,8 +182,8 @@ pub fn rotate_left_32(x u32, k int) u32 { // This function's execution time does not depend on the inputs. @[inline] pub fn rotate_left_64(x u64, k int) u64 { - s := u64(k) & (bits.n64 - u64(1)) - return (x << s) | (x >> (bits.n64 - s)) + s := u64(k) & (n64 - u64(1)) + return (x << s) | (x >> (n64 - s)) } // --- Reverse --- @@ -202,18 +202,18 @@ pub fn reverse_16(x u16) u16 { // reverse_32 returns the value of x with its bits in reversed order. @[inline] pub fn reverse_32(x u32) u32 { - mut y := ((x >> u32(1) & (bits.m0 & max_u32)) | ((x & (bits.m0 & max_u32)) << 1)) - y = ((y >> u32(2) & (bits.m1 & max_u32)) | ((y & (bits.m1 & max_u32)) << u32(2))) - y = ((y >> u32(4) & (bits.m2 & max_u32)) | ((y & (bits.m2 & max_u32)) << u32(4))) + mut y := ((x >> u32(1) & (m0 & max_u32)) | ((x & (m0 & max_u32)) << 1)) + y = ((y >> u32(2) & (m1 & max_u32)) | ((y & (m1 & max_u32)) << u32(2))) + y = ((y >> u32(4) & (m2 & max_u32)) | ((y & (m2 & max_u32)) << u32(4))) return reverse_bytes_32(u32(y)) } // reverse_64 returns the value of x with its bits in reversed order. @[inline] pub fn reverse_64(x u64) u64 { - mut y := ((x >> u64(1) & (bits.m0 & max_u64)) | ((x & (bits.m0 & max_u64)) << 1)) - y = ((y >> u64(2) & (bits.m1 & max_u64)) | ((y & (bits.m1 & max_u64)) << 2)) - y = ((y >> u64(4) & (bits.m2 & max_u64)) | ((y & (bits.m2 & max_u64)) << 4)) + mut y := ((x >> u64(1) & (m0 & max_u64)) | ((x & (m0 & max_u64)) << 1)) + y = ((y >> u64(2) & (m1 & max_u64)) | ((y & (m1 & max_u64)) << 2)) + y = ((y >> u64(4) & (m2 & max_u64)) | ((y & (m2 & max_u64)) << 4)) return reverse_bytes_64(y) } @@ -231,7 +231,7 @@ pub fn reverse_bytes_16(x u16) u16 { // This function's execution time does not depend on the inputs. @[inline] pub fn reverse_bytes_32(x u32) u32 { - y := ((x >> u32(8) & (bits.m3 & max_u32)) | ((x & (bits.m3 & max_u32)) << u32(8))) + y := ((x >> u32(8) & (m3 & max_u32)) | ((x & (m3 & max_u32)) << u32(8))) return u32((y >> 16) | (y << 16)) } @@ -240,8 +240,8 @@ pub fn reverse_bytes_32(x u32) u32 { // This function's execution time does not depend on the inputs. @[inline] pub fn reverse_bytes_64(x u64) u64 { - mut y := ((x >> u64(8) & (bits.m3 & max_u64)) | ((x & (bits.m3 & max_u64)) << u64(8))) - y = ((y >> u64(16) & (bits.m4 & max_u64)) | ((y & (bits.m4 & max_u64)) << u64(16))) + mut y := ((x >> u64(8) & (m3 & max_u64)) | ((x & (m3 & max_u64)) << u64(8))) + y = ((y >> u64(16) & (m4 & max_u64)) | ((y & (m4 & max_u64)) << u64(16))) return (y >> 32) | (y << 32) } @@ -387,13 +387,13 @@ pub fn mul_32(x u32, y u32) (u32, u32) { // // This function's execution time does not depend on the inputs. pub fn mul_64(x u64, y u64) (u64, u64) { - x0 := x & bits.mask32 + x0 := x & mask32 x1 := x >> 32 - y0 := y & bits.mask32 + y0 := y & mask32 y1 := y >> 32 w0 := x0 * y0 t := x1 * y0 + (w0 >> 32) - mut w1 := t & bits.mask32 + mut w1 := t & mask32 w2 := t >> 32 w1 += x0 * y1 hi := x1 * y1 + w2 + (w1 >> 32) @@ -408,7 +408,7 @@ pub fn mul_64(x u64, y u64) (u64, u64) { // div_32 panics for y == 0 (division by zero) or y <= hi (quotient overflow). pub fn div_32(hi u32, lo u32, y u32) (u32, u32) { if y != 0 && y <= hi { - panic(bits.overflow_error) + panic(overflow_error) } z := (u64(hi) << 32) | u64(lo) quo := u32(z / u64(y)) @@ -423,15 +423,15 @@ pub fn div_32(hi u32, lo u32, y u32) (u32, u32) { pub fn div_64(hi u64, lo u64, y1 u64) (u64, u64) { mut y := y1 if y == 0 { - panic(bits.overflow_error) + panic(overflow_error) } if y <= hi { - panic(bits.overflow_error) + panic(overflow_error) } s := u32(leading_zeros_64(y)) y <<= s yn1 := y >> 32 - yn0 := y & bits.mask32 + yn0 := y & mask32 ss1 := (hi << s) xxx := 64 - s mut ss2 := lo >> xxx @@ -454,28 +454,28 @@ pub fn div_64(hi u64, lo u64, y1 u64) (u64, u64) { un32 := ss1 | ss2 un10 := lo << s un1 := un10 >> 32 - un0 := un10 & bits.mask32 + un0 := un10 & mask32 mut q1 := un32 / yn1 mut rhat := un32 - (q1 * yn1) - for q1 >= bits.two32 || (q1 * yn0) > ((bits.two32 * rhat) + un1) { + for q1 >= two32 || (q1 * yn0) > ((two32 * rhat) + un1) { q1-- rhat += yn1 - if rhat >= bits.two32 { + if rhat >= two32 { break } } - un21 := (un32 * bits.two32) + (un1 - (q1 * y)) + un21 := (un32 * two32) + (un1 - (q1 * y)) mut q0 := un21 / yn1 rhat = un21 - q0 * yn1 - for q0 >= bits.two32 || (q0 * yn0) > ((bits.two32 * rhat) + un0) { + for q0 >= two32 || (q0 * yn0) > ((two32 * rhat) + un0) { q0-- rhat += yn1 - if rhat >= bits.two32 { + if rhat >= two32 { break } } - qq := ((q1 * bits.two32) + q0) - rr := ((un21 * bits.two32) + un0 - (q0 * y)) >> s + qq := ((q1 * two32) + q0) + rr := ((un21 * two32) + un0 - (q0 * y)) >> s return qq, rr } diff --git a/vlib/math/erf.v b/vlib/math/erf.v index 3864bdf1eb8a79..db08a084aadfe7 100644 --- a/vlib/math/erf.v +++ b/vlib/math/erf.v @@ -236,15 +236,14 @@ pub fn erf(a f64) f64 { mut temp := 0.0 if x < small_ { // |x| < 2**-28 if x < very_tiny { - temp = 0.125 * (8.0 * x + math.efx8 * x) // avoid underflow + temp = 0.125 * (8.0 * x + efx8 * x) // avoid underflow } else { - temp = x + math.efx * x + temp = x + efx * x } } else { z := x * x - r := math.pp0 + z * (math.pp1 + z * (math.pp2 + z * (math.pp3 + z * math.pp4))) - s_ := 1.0 + z * (math.qq1 + z * (math.qq2 + z * (math.qq3 + z * (math.qq4 + - z * math.qq5)))) + r := pp0 + z * (pp1 + z * (pp2 + z * (pp3 + z * pp4))) + s_ := 1.0 + z * (qq1 + z * (qq2 + z * (qq3 + z * (qq4 + z * qq5)))) y := r / s_ temp = x + x * y } @@ -255,14 +254,12 @@ pub fn erf(a f64) f64 { } if x < 1.25 { // 0.84375 <= |x| < 1.25 s_ := x - 1 - p := math.pa0 + s_ * (math.pa1 + s_ * (math.pa2 + s_ * (math.pa3 + s_ * (math.pa4 + - s_ * (math.pa5 + s_ * math.pa6))))) - q := 1.0 + s_ * (math.qa1 + s_ * (math.qa2 + s_ * (math.qa3 + s_ * (math.qa4 + - s_ * (math.qa5 + s_ * math.qa6))))) + p := pa0 + s_ * (pa1 + s_ * (pa2 + s_ * (pa3 + s_ * (pa4 + s_ * (pa5 + s_ * pa6))))) + q := 1.0 + s_ * (qa1 + s_ * (qa2 + s_ * (qa3 + s_ * (qa4 + s_ * (qa5 + s_ * qa6))))) if sign { - return -math.erx - p / q + return -erx - p / q } - return math.erx + p / q + return erx + p / q } if x >= 6 { // inf > |x| >= 6 if sign { @@ -274,15 +271,14 @@ pub fn erf(a f64) f64 { mut r := 0.0 mut s := 0.0 if x < 1.0 / 0.35 { // |x| < 1 / 0.35 ~ 2.857143 - r = math.ra0 + s_ * (math.ra1 + s_ * (math.ra2 + s_ * (math.ra3 + s_ * (math.ra4 + - s_ * (math.ra5 + s_ * (math.ra6 + s_ * math.ra7)))))) - s = 1.0 + s_ * (math.sa1 + s_ * (math.sa2 + s_ * (math.sa3 + s_ * (math.sa4 + - s_ * (math.sa5 + s_ * (math.sa6 + s_ * (math.sa7 + s_ * math.sa8))))))) + r = ra0 + s_ * (ra1 + s_ * (ra2 + s_ * (ra3 + s_ * (ra4 + s_ * (ra5 + s_ * (ra6 + + s_ * ra7)))))) + s = 1.0 + s_ * (sa1 + s_ * (sa2 + s_ * (sa3 + s_ * (sa4 + s_ * (sa5 + s_ * (sa6 + + s_ * (sa7 + s_ * sa8))))))) } else { // |x| >= 1 / 0.35 ~ 2.857143 - r = math.rb0 + s_ * (math.rb1 + s_ * (math.rb2 + s_ * (math.rb3 + s_ * (math.rb4 + - s_ * (math.rb5 + s_ * math.rb6))))) - s = 1.0 + s_ * (math.sb1 + s_ * (math.sb2 + s_ * (math.sb3 + s_ * (math.sb4 + - s_ * (math.sb5 + s_ * (math.sb6 + s_ * math.sb7)))))) + r = rb0 + s_ * (rb1 + s_ * (rb2 + s_ * (rb3 + s_ * (rb4 + s_ * (rb5 + s_ * rb6))))) + s = 1.0 + s_ * (sb1 + s_ * (sb2 + s_ * (sb3 + s_ * (sb4 + s_ * (sb5 + s_ * (sb6 + + s_ * sb7)))))) } z := f64_from_bits(f64_bits(x) & 0xffffffff00000000) // pseudo-single (20-bit) precision x r_ := exp(-z * z - 0.5625) * exp((z - x) * (z + x) + r / s) @@ -322,9 +318,8 @@ pub fn erfc(a f64) f64 { temp = x } else { z := x * x - r := math.pp0 + z * (math.pp1 + z * (math.pp2 + z * (math.pp3 + z * math.pp4))) - s_ := 1.0 + z * (math.qq1 + z * (math.qq2 + z * (math.qq3 + z * (math.qq4 + - z * math.qq5)))) + r := pp0 + z * (pp1 + z * (pp2 + z * (pp3 + z * pp4))) + s_ := 1.0 + z * (qq1 + z * (qq2 + z * (qq3 + z * (qq4 + z * qq5)))) y := r / s_ if x < 0.25 { // |x| < 1.0/4 temp = x + x * y @@ -339,32 +334,29 @@ pub fn erfc(a f64) f64 { } if x < 1.25 { // 0.84375 <= |x| < 1.25 s_ := x - 1 - p := math.pa0 + s_ * (math.pa1 + s_ * (math.pa2 + s_ * (math.pa3 + s_ * (math.pa4 + - s_ * (math.pa5 + s_ * math.pa6))))) - q := 1.0 + s_ * (math.qa1 + s_ * (math.qa2 + s_ * (math.qa3 + s_ * (math.qa4 + - s_ * (math.qa5 + s_ * math.qa6))))) + p := pa0 + s_ * (pa1 + s_ * (pa2 + s_ * (pa3 + s_ * (pa4 + s_ * (pa5 + s_ * pa6))))) + q := 1.0 + s_ * (qa1 + s_ * (qa2 + s_ * (qa3 + s_ * (qa4 + s_ * (qa5 + s_ * qa6))))) if sign { - return 1.0 + math.erx + p / q + return 1.0 + erx + p / q } - return 1.0 - math.erx - p / q + return 1.0 - erx - p / q } if x < 28 { // |x| < 28 s_ := 1.0 / (x * x) mut r := 0.0 mut s := 0.0 if x < 1.0 / 0.35 { // |x| < 1 / 0.35 ~ 2.857143 - r = math.ra0 + s_ * (math.ra1 + s_ * (math.ra2 + s_ * (math.ra3 + s_ * (math.ra4 + - s_ * (math.ra5 + s_ * (math.ra6 + s_ * math.ra7)))))) - s = 1.0 + s_ * (math.sa1 + s_ * (math.sa2 + s_ * (math.sa3 + s_ * (math.sa4 + - s_ * (math.sa5 + s_ * (math.sa6 + s_ * (math.sa7 + s_ * math.sa8))))))) + r = ra0 + s_ * (ra1 + s_ * (ra2 + s_ * (ra3 + s_ * (ra4 + s_ * (ra5 + s_ * (ra6 + + s_ * ra7)))))) + s = 1.0 + s_ * (sa1 + s_ * (sa2 + s_ * (sa3 + s_ * (sa4 + s_ * (sa5 + s_ * (sa6 + + s_ * (sa7 + s_ * sa8))))))) } else { // |x| >= 1 / 0.35 ~ 2.857143 if sign && x > 6 { return 2.0 // x < -6 } - r = math.rb0 + s_ * (math.rb1 + s_ * (math.rb2 + s_ * (math.rb3 + s_ * (math.rb4 + - s_ * (math.rb5 + s_ * math.rb6))))) - s = 1.0 + s_ * (math.sb1 + s_ * (math.sb2 + s_ * (math.sb3 + s_ * (math.sb4 + - s_ * (math.sb5 + s_ * (math.sb6 + s_ * math.sb7)))))) + r = rb0 + s_ * (rb1 + s_ * (rb2 + s_ * (rb3 + s_ * (rb4 + s_ * (rb5 + s_ * rb6))))) + s = 1.0 + s_ * (sb1 + s_ * (sb2 + s_ * (sb3 + s_ * (sb4 + s_ * (sb5 + s_ * (sb6 + + s_ * sb7)))))) } z := f64_from_bits(f64_bits(x) & 0xffffffff00000000) // pseudo-single (20-bit) precision x r_ := exp(-z * z - 0.5625) * exp((z - x) * (z + x) + r / s) diff --git a/vlib/math/exp.v b/vlib/math/exp.v index c845820b05808c..f65d6f0708f9f5 100644 --- a/vlib/math/exp.v +++ b/vlib/math/exp.v @@ -65,8 +65,8 @@ pub fn exp(x f64) f64 { if x > 0 { k = int(log2e * x + 0.5) } - hi := x - f64(k) * math.ln2hi - lo := f64(k) * math.ln2lo + hi := x - f64(k) * ln2hi + lo := f64(k) * ln2lo // compute return expmulti(hi, lo, k) } @@ -99,8 +99,8 @@ pub fn exp2(x f64) f64 { k = int(x - 0.5) } mut t := x - f64(k) - hi := t * math.ln2hi - lo := -t * math.ln2lo + hi := t * ln2hi + lo := -t * ln2lo // compute return expmulti(hi, lo, k) } diff --git a/vlib/math/fractions/approximations.v b/vlib/math/fractions/approximations.v index ec3b16227f776e..1d08d3550298b8 100644 --- a/vlib/math/fractions/approximations.v +++ b/vlib/math/fractions/approximations.v @@ -71,13 +71,13 @@ fn eval_cf(whole i64, den []i64) Fraction { // within the default epsilon value (1.0e-4). This means the result will // be accurate to 3 places after the decimal. pub fn approximate(val f64) Fraction { - return approximate_with_eps(val, fractions.default_eps) + return approximate_with_eps(val, default_eps) } // approximate_with_eps returns a Fraction pub fn approximate_with_eps(val f64, eps f64) Fraction { if val == 0.0 { - return fractions.zero + return zero } if eps < 0.0 { panic('Epsilon value cannot be negative.') @@ -94,12 +94,12 @@ pub fn approximate_with_eps(val f64, eps f64) Fraction { return fraction(whole, 1) } mut d := []i64{} - mut partial := fractions.zero + mut partial := zero // We must complete the approximation within the maximum number of // itertations allowed. If we can't panic. // Empirically tested: the hardest constant to approximate is the // golden ratio (math.phi) and for f64s, it only needs 38 iterations. - for _ in 0 .. fractions.max_iterations { + for _ in 0 .. max_iterations { // We calculate the reciprocal. That's why the numerator is // always 1. frac = 1.0 / frac diff --git a/vlib/math/invtrig.v b/vlib/math/invtrig.v index 7a8b3b6f62d772..80d034297e7487 100644 --- a/vlib/math/invtrig.v +++ b/vlib/math/invtrig.v @@ -75,10 +75,10 @@ fn satan(x f64) f64 { if x <= 0.66 { return xatan(x) } - if x > math.tan3pio8 { - return pi / 2.0 - xatan(1.0 / x) + f64(math.morebits) + if x > tan3pio8 { + return pi / 2.0 - xatan(1.0 / x) + f64(morebits) } - return pi / 4 + xatan((x - 1.0) / (x + 1.0)) + 0.5 * f64(math.morebits) + return pi / 4 + xatan((x - 1.0) / (x + 1.0)) + 0.5 * f64(morebits) } // atan returns the arctangent, in radians, of x. @@ -208,7 +208,7 @@ pub fn acos(x f64) f64 { return f64(2.0) * asin(sqrt(0.5 - 0.5 * x)) } mut z := pi / f64(4.0) - asin(x) - z = z + math.morebits + z = z + morebits z = z + pi / f64(4.0) return z } diff --git a/vlib/math/math_test.v b/vlib/math/math_test.v index 82b9bc039118d0..60ba9ec11ae208 100644 --- a/vlib/math/math_test.v +++ b/vlib/math/math_test.v @@ -212,10 +212,10 @@ fn test_angle_diff() { } fn test_acos() { - for i := 0; i < math.vf_.len; i++ { - a := math.vf_[i] / 10 + for i := 0; i < vf_.len; i++ { + a := vf_[i] / 10 f := acos(a) - assert soclose(math.acos_[i], f, 1e-7) + assert soclose(acos_[i], f, 1e-7) } vfacos_sc_ := [-pi, 1, pi, nan()] acos_sc_ := [nan(), 0, nan(), nan()] @@ -226,10 +226,10 @@ fn test_acos() { } fn test_acosh() { - for i := 0; i < math.vf_.len; i++ { - a := 1.0 + abs(math.vf_[i]) + for i := 0; i < vf_.len; i++ { + a := 1.0 + abs(vf_[i]) f := acosh(a) - assert veryclose(math.acosh_[i], f) + assert veryclose(acosh_[i], f) } vfacosh_sc_ := [inf(-1), 0.5, 1, inf(1), nan()] acosh_sc_ := [nan(), nan(), 0, inf(1), nan()] @@ -240,10 +240,10 @@ fn test_acosh() { } fn test_asin() { - for i := 0; i < math.vf_.len; i++ { - a := math.vf_[i] / 10 + for i := 0; i < vf_.len; i++ { + a := vf_[i] / 10 f := asin(a) - assert veryclose(math.asin_[i], f) + assert veryclose(asin_[i], f) } vfasin_sc_ := [-pi, copysign(0, -1), 0, pi, nan()] asin_sc_ := [nan(), copysign(0, -1), 0, nan(), nan()] @@ -254,9 +254,9 @@ fn test_asin() { } fn test_asinh() { - for i := 0; i < math.vf_.len; i++ { - f := asinh(math.vf_[i]) - assert veryclose(math.asinh_[i], f) + for i := 0; i < vf_.len; i++ { + f := asinh(vf_[i]) + assert veryclose(asinh_[i], f) } vfasinh_sc_ := [inf(-1), copysign(0, -1), 0, inf(1), nan()] asinh_sc_ := [inf(-1), copysign(0, -1), 0, inf(1), nan()] @@ -267,9 +267,9 @@ fn test_asinh() { } fn test_atan() { - for i := 0; i < math.vf_.len; i++ { - f := atan(math.vf_[i]) - assert veryclose(math.atan_[i], f) + for i := 0; i < vf_.len; i++ { + f := atan(vf_[i]) + assert veryclose(atan_[i], f) } vfatan_sc_ := [inf(-1), copysign(0, -1), 0, inf(1), nan()] atan_sc_ := [f64(-pi / 2), copysign(0, -1), 0, pi / 2, nan()] @@ -280,10 +280,10 @@ fn test_atan() { } fn test_atanh() { - for i := 0; i < math.vf_.len; i++ { - a := math.vf_[i] / 10 + for i := 0; i < vf_.len; i++ { + a := vf_[i] / 10 f := atanh(a) - assert veryclose(math.atanh_[i], f) + assert veryclose(atanh_[i], f) } vfatanh_sc_ := [inf(-1), -pi, -1, copysign(0, -1), 0, 1, pi, inf(1), nan()] @@ -296,9 +296,9 @@ fn test_atanh() { } fn test_atan2() { - for i := 0; i < math.vf_.len; i++ { - f := atan2(10, math.vf_[i]) - assert veryclose(math.atan2_[i], f) + for i := 0; i < vf_.len; i++ { + f := atan2(10, vf_[i]) + assert veryclose(atan2_[i], f) } vfatan2_sc_ := [[inf(-1), inf(-1)], [inf(-1), -pi], [inf(-1), 0], [inf(-1), pi], [inf(-1), inf(1)], [inf(-1), nan()], [-pi, inf(-1)], @@ -369,9 +369,9 @@ fn test_ceil() { } fn test_cos() { - for i := 0; i < math.vf_.len; i++ { - f := cos(math.vf_[i]) - assert veryclose(math.cos_[i], f) + for i := 0; i < vf_.len; i++ { + f := cos(vf_[i]) + assert veryclose(cos_[i], f) } vfcos_sc_ := [inf(-1), inf(1), nan()] cos_sc_ := [nan(), nan(), nan()] @@ -382,9 +382,9 @@ fn test_cos() { } fn test_cosh() { - for i := 0; i < math.vf_.len; i++ { - f := cosh(math.vf_[i]) - assert close(math.cosh_[i], f) + for i := 0; i < vf_.len; i++ { + f := cosh(vf_[i]) + assert close(cosh_[i], f) } vfcosh_sc_ := [inf(-1), copysign(0, -1), 0, inf(1), nan()] cosh_sc_ := [inf(1), 1, 1, inf(1), nan()] @@ -395,15 +395,15 @@ fn test_cosh() { } fn test_expm1() { - for i := 0; i < math.vf_.len; i++ { - a := math.vf_[i] / 100 + for i := 0; i < vf_.len; i++ { + a := vf_[i] / 100 f := expm1(a) - assert veryclose(math.expm1_[i], f) + assert veryclose(expm1_[i], f) } - for i := 0; i < math.vf_.len; i++ { - a := math.vf_[i] * 10 + for i := 0; i < vf_.len; i++ { + a := vf_[i] * 10 f := expm1(a) - assert close(math.expm1_large_[i], f) + assert close(expm1_large_[i], f) } // vfexpm1_sc_ := [f64(-710), copysign(0, -1), 0, 710, inf(1), nan()] // expm1_sc_ := [f64(-1), copysign(0, -1), 0, inf(1), inf(1), nan()] @@ -414,9 +414,9 @@ fn test_expm1() { } fn test_abs() { - for i := 0; i < math.vf_.len; i++ { - f := abs(math.vf_[i]) - assert math.fabs_[i] == f + for i := 0; i < vf_.len; i++ { + f := abs(vf_[i]) + assert fabs_[i] == f } } @@ -431,9 +431,9 @@ fn test_abs_zero() { } fn test_floor() { - for i := 0; i < math.vf_.len; i++ { - f := floor(math.vf_[i]) - assert alike(math.floor_[i], f) + for i := 0; i < vf_.len; i++ { + f := floor(vf_[i]) + assert alike(floor_[i], f) } vfceil_sc_ := [inf(-1), copysign(0, -1), 0, inf(1), nan()] ceil_sc_ := [inf(-1), copysign(0, -1), 0, inf(1), nan()] @@ -444,16 +444,16 @@ fn test_floor() { } fn test_max() { - for i := 0; i < math.vf_.len; i++ { - f := max(math.vf_[i], math.ceil_[i]) - assert math.ceil_[i] == f + for i := 0; i < vf_.len; i++ { + f := max(vf_[i], ceil_[i]) + assert ceil_[i] == f } } fn test_min() { - for i := 0; i < math.vf_.len; i++ { - f := min(math.vf_[i], math.floor_[i]) - assert math.floor_[i] == f + for i := 0; i < vf_.len; i++ { + f := min(vf_[i], floor_[i]) + assert floor_[i] == f } } @@ -512,9 +512,9 @@ fn test_sign() { } fn test_mod() { - for i := 0; i < math.vf_.len; i++ { - f := mod(10, math.vf_[i]) - assert math.fmod_[i] == f + for i := 0; i < vf_.len; i++ { + f := mod(10, vf_[i]) + assert fmod_[i] == f } // verify precision of result for extreme inputs f := mod(5.9790119248836734e+200, 1.1258465975523544) @@ -529,9 +529,9 @@ fn test_cbrt() { } fn test_exp() { - for i := 0; i < math.vf_.len; i++ { - f := exp(math.vf_[i]) - assert close(math.exp_[i], f), 'math.exp_[i]: ${math.exp_[i]:10}, ${f64_bits(math.exp_[i]):12} | f: ${f}, ${f64_bits(f):12}' + for i := 0; i < vf_.len; i++ { + f := exp(vf_[i]) + assert close(exp_[i], f), 'math.exp_[i]: ${exp_[i]:10}, ${f64_bits(exp_[i]):12} | f: ${f}, ${f64_bits(f):12}' } vfexp_sc_ := [inf(-1), -2000, 2000, inf(1), nan(), // smallest f64 that overflows Exp(x) 7.097827128933841e+02, 1.48852223e+09, 1.4885222e+09, 1, // near zero @@ -546,9 +546,9 @@ fn test_exp() { } fn test_exp2() { - for i := 0; i < math.vf_.len; i++ { - f := exp2(math.vf_[i]) - assert soclose(math.exp2_[i], f, 1e-9) + for i := 0; i < vf_.len; i++ { + f := exp2(vf_[i]) + assert soclose(exp2_[i], f, 1e-9) } vfexp2_sc_ := [f64(-2000), 2000, inf(1), nan(), // smallest f64 that overflows Exp2(x) 1024, -1.07399999999999e+03, // near underflow @@ -567,9 +567,9 @@ fn test_exp2() { } fn test_frexp() { - for i := 0; i < math.vf_.len; i++ { - f, j := frexp(math.vf_[i]) - assert veryclose(math.frexp_[i].f, f) || math.frexp_[i].i != j + for i := 0; i < vf_.len; i++ { + f, j := frexp(vf_[i]) + assert veryclose(frexp_[i].f, f) || frexp_[i].i != j } // vffrexp_sc_ := [inf(-1), copysign(0, -1), 0, inf(1), nan()] // frexp_sc_ := [Fi{inf(-1), 0}, Fi{copysign(0, -1), 0}, Fi{0, 0}, @@ -646,9 +646,9 @@ fn test_gamma() { } fn test_hypot() { - for i := 0; i < math.vf_.len; i++ { - a := abs(1e+200 * math.tanh_[i] * sqrt(2.0)) - f := hypot(1e+200 * math.tanh_[i], 1e+200 * math.tanh_[i]) + for i := 0; i < vf_.len; i++ { + a := abs(1e+200 * tanh_[i] * sqrt(2.0)) + f := hypot(1e+200 * tanh_[i], 1e+200 * tanh_[i]) assert veryclose(a, f) } vfhypot_sc_ := [[inf(-1), inf(-1)], [inf(-1), 0], [inf(-1), @@ -669,9 +669,9 @@ fn test_hypot() { } fn test_ldexp() { - for i := 0; i < math.vf_.len; i++ { - f := ldexp(math.frexp_[i].f, math.frexp_[i].i) - assert veryclose(math.vf_[i], f) + for i := 0; i < vf_.len; i++ { + f := ldexp(frexp_[i].f, frexp_[i].i) + assert veryclose(vf_[i], f) } vffrexp_sc_ := [inf(-1), copysign(0, -1), 0, inf(1), nan()] frexp_sc_ := [Fi{inf(-1), 0}, Fi{copysign(0, -1), 0}, Fi{0, 0}, @@ -695,9 +695,9 @@ fn test_ldexp() { } fn test_log_gamma() { - for i := 0; i < math.vf_.len; i++ { - f, s := log_gamma_sign(math.vf_[i]) - assert soclose(math.log_gamma_[i].f, f, 1e-6) && math.log_gamma_[i].i == s + for i := 0; i < vf_.len; i++ { + f, s := log_gamma_sign(vf_[i]) + assert soclose(log_gamma_[i].f, f, 1e-6) && log_gamma_[i].i == s } // vflog_gamma_sc_ := [inf(-1), -3, 0, 1, 2, inf(1), nan()] // log_gamma_sc_ := [Fi{inf(-1), 1}, Fi{inf(1), 1}, Fi{inf(1), 1}, @@ -709,10 +709,10 @@ fn test_log_gamma() { } fn test_log() { - for i := 0; i < math.vf_.len; i++ { - a := abs(math.vf_[i]) + for i := 0; i < vf_.len; i++ { + a := abs(vf_[i]) f := log(a) - assert math.log_[i] == f + assert log_[i] == f } vflog_sc_ := [inf(-1), -pi, copysign(0, -1), 0, 1, inf(1), nan()] @@ -726,10 +726,10 @@ fn test_log() { } fn test_log10() { - for i := 0; i < math.vf_.len; i++ { - a := abs(math.vf_[i]) + for i := 0; i < vf_.len; i++ { + a := abs(vf_[i]) f := log10(a) - assert veryclose(math.log10_[i], f) + assert veryclose(log10_[i], f) } vflog_sc_ := [inf(-1), -pi, copysign(0, -1), 0, 1, inf(1), nan()] @@ -741,9 +741,9 @@ fn test_log10() { } fn test_pow() { - for i := 0; i < math.vf_.len; i++ { - f := pow(10, math.vf_[i]) - assert close(math.pow_[i], f) + for i := 0; i < vf_.len; i++ { + f := pow(10, vf_[i]) + assert close(pow_[i], f) } vfpow_sc_ := [[inf(-1), -pi], [inf(-1), -3], [inf(-1), -0.0], [inf(-1), 0], [inf(-1), 1], [inf(-1), 3], [inf(-1), pi], @@ -843,9 +843,9 @@ fn test_pow() { } fn test_round() { - for i := 0; i < math.vf_.len; i++ { - f := round(math.vf_[i]) - assert alike(math.round_[i], f) + for i := 0; i < vf_.len; i++ { + f := round(vf_[i]) + assert alike(round_[i], f) } vfround_sc_ := [[f64(0), 0], [nan(), nan()], [inf(1), inf(1)]] // vfround_even_sc_ := [[f64(0), 0], [f64(1.390671161567e-309), 0], // denormal @@ -875,9 +875,9 @@ fn fn_test_round_sig() { } fn test_sin() { - for i := 0; i < math.vf_.len; i++ { - f := sin(math.vf_[i]) - assert veryclose(math.sin_[i], f) + for i := 0; i < vf_.len; i++ { + f := sin(vf_[i]) + assert veryclose(sin_[i], f) } vfsin_sc_ := [inf(-1), copysign(0, -1), 0, inf(1), nan()] sin_sc_ := [nan(), copysign(0, -1), 0, nan(), nan()] @@ -888,10 +888,10 @@ fn test_sin() { } fn test_sincos() { - for i := 0; i < math.vf_.len; i++ { - f, g := sincos(math.vf_[i]) - assert veryclose(math.sin_[i], f) - assert veryclose(math.cos_[i], g) + for i := 0; i < vf_.len; i++ { + f, g := sincos(vf_[i]) + assert veryclose(sin_[i], f) + assert veryclose(cos_[i], g) } vfsin_sc_ := [inf(-1), copysign(0, -1), 0, inf(1), nan()] sin_sc_ := [nan(), copysign(0, -1), 0, nan(), nan()] @@ -908,9 +908,9 @@ fn test_sincos() { } fn test_sinh() { - for i := 0; i < math.vf_.len; i++ { - f := sinh(math.vf_[i]) - assert close(math.sinh_[i], f) + for i := 0; i < vf_.len; i++ { + f := sinh(vf_[i]) + assert close(sinh_[i], f) } vfsinh_sc_ := [inf(-1), copysign(0, -1), 0, inf(1), nan()] sinh_sc_ := [inf(-1), copysign(0, -1), 0, inf(1), nan()] @@ -921,13 +921,13 @@ fn test_sinh() { } fn test_sqrt() { - for i := 0; i < math.vf_.len; i++ { - mut a := abs(math.vf_[i]) + for i := 0; i < vf_.len; i++ { + mut a := abs(vf_[i]) mut f := sqrt(a) - assert veryclose(math.sqrt_[i], f) - a = abs(math.vf_[i]) + assert veryclose(sqrt_[i], f) + a = abs(vf_[i]) f = sqrt(a) - assert veryclose(math.sqrt_[i], f) + assert veryclose(sqrt_[i], f) } vfsqrt_sc_ := [inf(-1), -pi, copysign(0, -1), 0, inf(1), nan()] sqrt_sc_ := [nan(), nan(), copysign(0, -1), 0, inf(1), nan()] @@ -940,9 +940,9 @@ fn test_sqrt() { } fn test_tan() { - for i := 0; i < math.vf_.len; i++ { - f := tan(math.vf_[i]) - assert veryclose(math.tan_[i], f) + for i := 0; i < vf_.len; i++ { + f := tan(vf_[i]) + assert veryclose(tan_[i], f) } vfsin_sc_ := [inf(-1), copysign(0, -1), 0, inf(1), nan()] sin_sc_ := [nan(), copysign(0, -1), 0, nan(), nan()] @@ -954,9 +954,9 @@ fn test_tan() { } fn test_tanh() { - for i := 0; i < math.vf_.len; i++ { - f := tanh(math.vf_[i]) - assert veryclose(math.tanh_[i], f) + for i := 0; i < vf_.len; i++ { + f := tanh(vf_[i]) + assert veryclose(tanh_[i], f) } vftanh_sc_ := [inf(-1), copysign(0, -1), 0, inf(1), nan()] tanh_sc_ := [f64(-1), copysign(0, -1), 0, 1, nan()] @@ -1049,27 +1049,27 @@ fn test_digits() { // a multiple of 2 * pi, is misleading.] fn test_large_cos() { large := 100000.0 * pi - for i := 0; i < math.vf_.len; i++ { - f1 := math.cos_large_[i] - f2 := cos(math.vf_[i] + large) + for i := 0; i < vf_.len; i++ { + f1 := cos_large_[i] + f2 := cos(vf_[i] + large) assert soclose(f1, f2, 4e-8) } } fn test_large_sin() { large := 100000.0 * pi - for i := 0; i < math.vf_.len; i++ { - f1 := math.sin_large_[i] - f2 := sin(math.vf_[i] + large) + for i := 0; i < vf_.len; i++ { + f1 := sin_large_[i] + f2 := sin(vf_[i] + large) assert soclose(f1, f2, 4e-9) } } fn test_large_tan() { large := 100000.0 * pi - for i := 0; i < math.vf_.len; i++ { - f1 := math.tan_large_[i] - f2 := tan(math.vf_[i] + large) + for i := 0; i < vf_.len; i++ { + f1 := tan_large_[i] + f2 := tan(vf_[i] + large) assert soclose(f1, f2, 4e-8) } } diff --git a/vlib/math/modf.v b/vlib/math/modf.v index 5421c5fb558df3..d301d8e7290890 100644 --- a/vlib/math/modf.v +++ b/vlib/math/modf.v @@ -11,11 +11,11 @@ const modf_maxpowtwo = 4.503599627370496000e+15 pub fn modf(f f64) (f64, f64) { abs_f := abs(f) mut i := 0.0 - if abs_f >= math.modf_maxpowtwo { + if abs_f >= modf_maxpowtwo { i = f // it must be an integer } else { - i = abs_f + math.modf_maxpowtwo // shift fraction off right - i -= math.modf_maxpowtwo // shift back without fraction + i = abs_f + modf_maxpowtwo // shift fraction off right + i -= modf_maxpowtwo // shift back without fraction for i > abs_f { // above arithmetic might round i -= 1.0 // test again just to be sure } diff --git a/vlib/math/pow.v b/vlib/math/pow.v index 093021215014c0..e1663c1e80b3cb 100644 --- a/vlib/math/pow.v +++ b/vlib/math/pow.v @@ -21,10 +21,10 @@ pub fn powf(a f32, b f32) f32 { // pow10(n) = +inf for n > 308 pub fn pow10(n int) f64 { if 0 <= n && n <= 308 { - return math.pow10postab32[u32(n) / 32] * math.pow10tab[u32(n) % 32] + return pow10postab32[u32(n) / 32] * pow10tab[u32(n) % 32] } if -323 <= n && n <= 0 { - return math.pow10negtab32[u32(-n) / 32] / math.pow10tab[u32(-n) % 32] + return pow10negtab32[u32(-n) / 32] / pow10tab[u32(-n) % 32] } // n < -323 || 308 < n if n > 0 { diff --git a/vlib/math/sin.v b/vlib/math/sin.v index c692e61ddabf9f..bed26b6b8e26e6 100644 --- a/vlib/math/sin.v +++ b/vlib/math/sin.v @@ -69,11 +69,11 @@ pub fn sin(x f64) f64 { mut result := 0.0 if octant == 0 { t := 8.0 * abs(z) / pi - 1.0 - sin_cs_val, _ := math.sin_cs.eval_e(t) + sin_cs_val, _ := sin_cs.eval_e(t) result = z * (1.0 + z * z * sin_cs_val) } else { t := 8.0 * abs(z) / pi - 1.0 - cos_cs_val, _ := math.cos_cs.eval_e(t) + cos_cs_val, _ := cos_cs.eval_e(t) result = 1.0 - 0.5 * z * z * (1.0 - z * z * cos_cs_val) } result *= sgn_result @@ -110,11 +110,11 @@ pub fn cos(x f64) f64 { mut result := 0.0 if octant == 0 { t := 8.0 * abs(z) / pi - 1.0 - cos_cs_val, _ := math.cos_cs.eval_e(t) + cos_cs_val, _ := cos_cs.eval_e(t) result = 1.0 - 0.5 * z * z * (1.0 - z * z * cos_cs_val) } else { t := 8.0 * abs(z) / pi - 1.0 - sin_cs_val, _ := math.sin_cs.eval_e(t) + sin_cs_val, _ := sin_cs.eval_e(t) result = z * (1.0 + z * z * sin_cs_val) } result *= sgn_result @@ -168,8 +168,8 @@ pub fn sincos(x f64) (f64, f64) { sgn_result_cos = if octant > 1 { -sgn_result_cos } else { sgn_result_cos } z := ((abs_x - y * p1) - y * p2) - y * p3 t := 8.0 * abs(z) / pi - 1.0 - sin_cs_val, _ := math.sin_cs.eval_e(t) - cos_cs_val, _ := math.cos_cs.eval_e(t) + sin_cs_val, _ := sin_cs.eval_e(t) + cos_cs_val, _ := cos_cs.eval_e(t) mut result_sin := 0.0 mut result_cos := 0.0 if octant == 0 { diff --git a/vlib/math/tan.v b/vlib/math/tan.v index f1ba3b15e4027c..17419ea4281264 100644 --- a/vlib/math/tan.v +++ b/vlib/math/tan.v @@ -31,7 +31,7 @@ pub fn tan(a f64) f64 { x = -x sign = -1 } - if x > math.tan_lossth { + if x > tan_lossth { return 0.0 } // compute x mod pi_4 @@ -44,11 +44,11 @@ pub fn tan(a f64) f64 { octant++ y += 1.0 } - z = ((x - y * math.tan_dp1) - y * math.tan_dp2) - y * math.tan_dp3 + z = ((x - y * tan_dp1) - y * tan_dp2) - y * tan_dp3 zz := z * z if zz > 1.0e-14 { - y = z + z * (zz * (((math.tan_p[0] * zz) + math.tan_p[1]) * zz + math.tan_p[2]) / ((((zz + - math.tan_q[1]) * zz + math.tan_q[2]) * zz + math.tan_q[3]) * zz + math.tan_q[4])) + y = z + z * (zz * (((tan_p[0] * zz) + tan_p[1]) * zz + tan_p[2]) / ((((zz + tan_q[1]) * zz + + tan_q[2]) * zz + tan_q[3]) * zz + tan_q[4])) } else { y = z } @@ -78,7 +78,7 @@ pub fn cot(a f64) f64 { x = -x sign = -1 } - if x > math.tan_lossth { + if x > tan_lossth { return 0.0 } // compute x mod pi_4 @@ -91,11 +91,11 @@ pub fn cot(a f64) f64 { octant++ y += 1.0 } - z = ((x - y * math.tan_dp1) - y * math.tan_dp2) - y * math.tan_dp3 + z = ((x - y * tan_dp1) - y * tan_dp2) - y * tan_dp3 zz := z * z if zz > 1.0e-14 { - y = z + z * (zz * (((math.tan_p[0] * zz) + math.tan_p[1]) * zz + math.tan_p[2]) / ((((zz + - math.tan_q[1]) * zz + math.tan_q[2]) * zz + math.tan_q[3]) * zz + math.tan_q[4])) + y = z + z * (zz * (((tan_p[0] * zz) + tan_p[1]) * zz + tan_p[2]) / ((((zz + tan_q[1]) * zz + + tan_q[2]) * zz + tan_q[3]) * zz + tan_q[4])) } else { y = z } diff --git a/vlib/math/tanh.v b/vlib/math/tanh.v index 26dd790a15b92b..4e3893fdbbda0f 100644 --- a/vlib/math/tanh.v +++ b/vlib/math/tanh.v @@ -36,8 +36,8 @@ pub fn tanh(x f64) f64 { return x } s := x * x - z = x + x * s * ((math.tanh_p[0] * s + math.tanh_p[1]) * s + math.tanh_p[2]) / (((s + - math.tanh_q[0]) * s + math.tanh_q[1]) * s + math.tanh_q[2]) + z = x + x * s * ((tanh_p[0] * s + tanh_p[1]) * s + tanh_p[2]) / (((s + tanh_q[0]) * s + + tanh_q[1]) * s + tanh_q[2]) } return z } diff --git a/vlib/math/unsigned/uint128.v b/vlib/math/unsigned/uint128.v index 3195713d8a6a29..3b0c9da56345f8 100644 --- a/vlib/math/unsigned/uint128.v +++ b/vlib/math/unsigned/uint128.v @@ -426,7 +426,7 @@ pub fn uint128_new(lo u64, hi u64) Uint128 { // unint_from_dec_str returns an error or new Uint128 from given string. // The `_` character is allowed as a separator. pub fn uint128_from_dec_str(value string) !Uint128 { - mut res := unsigned.uint128_zero + mut res := uint128_zero underscore := `_` for b_ in value { diff --git a/vlib/math/unsigned/uint256.v b/vlib/math/unsigned/uint256.v index 6f270d20470435..058a06dbee783a 100644 --- a/vlib/math/unsigned/uint256.v +++ b/vlib/math/unsigned/uint256.v @@ -389,7 +389,7 @@ pub fn (u_ Uint256) str() string { // uint256_from_dec_str creates a new `unsigned.Uint256` from the given string if possible // The `_` character is allowed as a separator. pub fn uint256_from_dec_str(value string) !Uint256 { - mut res := unsigned.uint256_zero + mut res := uint256_zero underscore := `_` for b_ in value { diff --git a/vlib/math/vec/vec2.v b/vlib/math/vec/vec2.v index f24e5340d31fae..2a4b045aae9506 100644 --- a/vlib/math/vec/vec2.v +++ b/vlib/math/vec/vec2.v @@ -266,7 +266,7 @@ pub fn (v Vec2[T]) eq(u Vec2[T]) bool { // eq_epsilon returns a bool indicating if the two vectors are equal within the module `vec_epsilon` const. pub fn (v Vec2[T]) eq_epsilon(u Vec2[T]) bool { - return v.eq_approx[T, f32](u, vec.vec_epsilon) + return v.eq_approx[T, f32](u, vec_epsilon) } // eq_approx returns whether these vectors are approximately equal within `tolerance`. diff --git a/vlib/net/address.c.v b/vlib/net/address.c.v index 2658c3c4718ad2..202742dd97d6fe 100644 --- a/vlib/net/address.c.v +++ b/vlib/net/address.c.v @@ -87,7 +87,7 @@ const max_ip6_len = 46 // str returns a string representation of `a` pub fn (a Ip) str() string { - buf := [net.max_ip_len]char{} + buf := [max_ip_len]char{} res := &char(C.inet_ntop(.ip, &a.addr, &buf[0], buf.len)) @@ -102,7 +102,7 @@ pub fn (a Ip) str() string { // str returns a string representation of `a` pub fn (a Ip6) str() string { - buf := [net.max_ip6_len]char{} + buf := [max_ip6_len]char{} res := &char(C.inet_ntop(.ip6, &a.addr, &buf[0], buf.len)) @@ -121,13 +121,13 @@ const aoffset = __offsetof(Addr, addr) pub fn (a Addr) len() u32 { match a.family() { .ip { - return sizeof(Ip) + net.aoffset + return sizeof(Ip) + aoffset } .ip6 { - return sizeof(Ip6) + net.aoffset + return sizeof(Ip6) + aoffset } .unix { - return sizeof(Unix) + net.aoffset + return sizeof(Unix) + aoffset } else { panic('Unknown address family') @@ -190,10 +190,10 @@ pub fn resolve_ipaddrs(addr string, family AddrFamily, typ SocketType) ![]Addr { if addr[0] == `:` { match family { .ip6 { - return [new_ip6(port, net.addr_ip6_any)] + return [new_ip6(port, addr_ip6_any)] } .ip, .unspec { - return [new_ip(port, net.addr_ip_any)] + return [new_ip(port, addr_ip_any)] } else {} } diff --git a/vlib/net/common.c.v b/vlib/net/common.c.v index 950c4356fe9f58..58f084cee9acea 100644 --- a/vlib/net/common.c.v +++ b/vlib/net/common.c.v @@ -124,7 +124,7 @@ fn @select(handle int, test Select, timeout time.Duration) !bool { // infinite timeout is signaled by passing null as the timeout to // select - if timeout == net.infinite_timeout { + if timeout == infinite_timeout { timeval_timeout = &C.timeval(unsafe { nil }) } @@ -148,7 +148,7 @@ fn select_deadline(handle int, test Select, deadline time.Time) !bool { // if we have a 0 deadline here then the timeout that was passed was infinite... infinite := deadline.unix() == 0 for infinite || time.now() <= deadline { - timeout := if infinite { net.infinite_timeout } else { deadline - time.now() } + timeout := if infinite { infinite_timeout } else { deadline - time.now() } ready := @select(handle, test, timeout) or { if err.code() == C.EINTR { // errno is 4, Spurious wakeup from signal, keep waiting @@ -169,7 +169,7 @@ fn select_deadline(handle int, test Select, deadline time.Time) !bool { // wait_for_common wraps the common wait code fn wait_for_common(handle int, deadline time.Time, timeout time.Duration, test Select) ! { // Convert timeouts to deadlines - real_deadline := if timeout == net.infinite_timeout { + real_deadline := if timeout == infinite_timeout { time.unix(0) } else if timeout == 0 { // No timeout set, so assume deadline diff --git a/vlib/net/ftp/ftp.v b/vlib/net/ftp/ftp.v index a18fc23f2b57c1..16edce9b4e3830 100644 --- a/vlib/net/ftp/ftp.v +++ b/vlib/net/ftp/ftp.v @@ -105,7 +105,7 @@ pub fn (mut zftp FTP) connect(oaddress string) !bool { zftp.conn = net.dial_tcp(oaddress)! zftp.reader = io.new_buffered_reader(reader: zftp.conn) code, _ := zftp.read()! - if code == ftp.connected { + if code == connected { return true } return false @@ -120,10 +120,10 @@ pub fn (mut zftp FTP) login(user string, passwd string) !bool { return false } mut code, _ := zftp.read()! - if code == ftp.logged_in { + if code == logged_in { return true } - if code != ftp.specify_password { + if code != specify_password { return false } zftp.write('PASS ${passwd}') or { @@ -133,7 +133,7 @@ pub fn (mut zftp FTP) login(user string, passwd string) !bool { return false } code, _ = zftp.read()! - if code == ftp.logged_in { + if code == logged_in { return true } return false @@ -161,12 +161,12 @@ pub fn (mut zftp FTP) cd(dir string) ! { zftp.write('CWD ${dir}') or { return } mut code, mut data := zftp.read()! match int(code) { - ftp.denied { + denied { $if debug { println('CD ${dir} denied!') } } - ftp.complete { + complete { code, data = zftp.read()! } else {} @@ -198,7 +198,7 @@ fn (mut zftp FTP) pasv() !&DTP { $if debug { println('pass: ${data}') } - if code != ftp.passive_mode { + if code != passive_mode { return error('passive mode not allowed') } dtp := new_dtp(data)! @@ -210,15 +210,15 @@ pub fn (mut zftp FTP) dir() ![]string { mut dtp := zftp.pasv() or { return error('Cannot establish data connection') } zftp.write('LIST')! code, _ := zftp.read()! - if code == ftp.denied { + if code == denied { return error('`LIST` denied') } - if code != ftp.open_data_connection { + if code != open_data_connection { return error('Data channel empty') } list_dir := dtp.read()! result, _ := zftp.read()! - if result != ftp.close_data_connection { + if result != close_data_connection { println('`LIST` not ok') } dtp.close() @@ -243,10 +243,10 @@ pub fn (mut zftp FTP) get(file string) ![]u8 { mut dtp := zftp.pasv() or { return error('Cannot stablish data connection') } zftp.write('RETR ${file}')! code, _ := zftp.read()! - if code == ftp.denied { + if code == denied { return error('Permission denied') } - if code != ftp.open_data_connection { + if code != open_data_connection { return error('Data connection not ready') } blob := dtp.read()! diff --git a/vlib/net/html/data_structures.v b/vlib/net/html/data_structures.v index 788682dbe1735d..64eab41d04d7ed 100644 --- a/vlib/net/html/data_structures.v +++ b/vlib/net/html/data_structures.v @@ -10,7 +10,7 @@ mut: @[inline] fn is_null(data int) bool { - return data == html.null_element + return data == null_element } @[inline] @@ -19,11 +19,11 @@ fn (stack Stack) is_empty() bool { } fn (stack Stack) peek() int { - return if !stack.is_empty() { stack.elements[stack.size - 1] } else { html.null_element } + return if !stack.is_empty() { stack.elements[stack.size - 1] } else { null_element } } fn (mut stack Stack) pop() int { - mut to_return := html.null_element + mut to_return := null_element if !stack.is_empty() { to_return = stack.elements[stack.size - 1] stack.size-- diff --git a/vlib/net/html/tag_test.v b/vlib/net/html/tag_test.v index 6ffe2ae67d29c6..57c57f3859d55a 100644 --- a/vlib/net/html/tag_test.v +++ b/vlib/net/html/tag_test.v @@ -29,7 +29,7 @@ const html = ' ' fn test_search_tag_by_type() { - mut dom := parse(html.html) + mut dom := parse(html) tag := dom.get_tags(name: 'body')[0] assert tag.get_tag('div')?.attributes['id'] == '1st' assert tag.get_tag_by_attribute('href')?.content == 'V' @@ -39,7 +39,7 @@ fn test_search_tag_by_type() { } fn test_search_tags_by_type() { - mut dom := parse(html.html) + mut dom := parse(html) tag := dom.get_tags_by_attribute_value('id', '2nd')[0] assert tag.get_tags('div').len == 5 assert tag.get_tags_by_attribute('href')[2].content == 'vpm' diff --git a/vlib/net/http/download_progress.v b/vlib/net/http/download_progress.v index f8c9d87e6dd0e1..5fe4a63668c441 100644 --- a/vlib/net/http/download_progress.v +++ b/vlib/net/http/download_progress.v @@ -65,7 +65,7 @@ const zz = &Downloader(unsafe { nil }) fn download_progres_cb(request &Request, chunk []u8, body_so_far u64, expected_size u64, status_code int) ! { // TODO: remove this hack, when `unsafe { &Downloader( request.user_ptr ) }` works reliably, // by just casting, without trying to promote the argument to the heap at all. - mut d := unsafe { http.zz } + mut d := unsafe { zz } pd := unsafe { &voidptr(&d) } unsafe { *pd = request.user_ptr diff --git a/vlib/net/http/file/folder_index.v b/vlib/net/http/file/folder_index.v index 0d63971ebe991e..d3a95549c437d1 100644 --- a/vlib/net/http/file/folder_index.v +++ b/vlib/net/http/file/folder_index.v @@ -11,7 +11,7 @@ fn get_folder_index_html(requested_file_path string, uri_path string, filter_mye sw := time.new_stopwatch() mut files := os.ls(requested_file_path) or { [] } if filter_myexe { - files = files.filter(!it.contains(file.myexe_prefix)) + files = files.filter(!it.contains(myexe_prefix)) } mut sb := strings.new_builder(files.len * 200) write_page_header(mut sb, uri_path) diff --git a/vlib/net/http/file/static_server.v b/vlib/net/http/file/static_server.v index ec895fd97e4e91..48cd8c6e1e43b4 100644 --- a/vlib/net/http/file/static_server.v +++ b/vlib/net/http/file/static_server.v @@ -78,7 +78,7 @@ fn (mut h StaticHttpHandler) handle(req http.Request) http.Response { } if !os.exists(requested_file_path) { res.set_status(.not_found) - res.body = file.no_such_file_doc + res.body = no_such_file_doc res.header.add(.content_type, 'text/html; charset=utf-8') return res } @@ -98,7 +98,7 @@ fn (mut h StaticHttpHandler) handle(req http.Request) http.Response { } else { body = os.read_file(ipath) or { res.set_status(.not_found) - file.no_such_file_doc + no_such_file_doc } } } else { diff --git a/vlib/net/http/header.v b/vlib/net/http/header.v index 4095c3980031f2..7e8c70c1f47c8c 100644 --- a/vlib/net/http/header.v +++ b/vlib/net/http/header.v @@ -508,7 +508,7 @@ pub fn (mut h Header) coerce(flags HeaderCoerceConfig) { if keys.len == new_keys.len { return } - mut new_data := [http.max_headers]HeaderKV{} + mut new_data := [max_headers]HeaderKV{} mut i := 0 for _, key in new_keys { for _, old_key in keys { @@ -756,8 +756,8 @@ pub fn (h Header) join(other Header) Header { // NOTE: Assumes sl is lowercase, since the caller usually already has the lowercase key fn canonicalize(name string) string { // check if we have a common header - if name in http.common_header_map { - return http.common_header_map[name].str() + if name in common_header_map { + return common_header_map[name].str() } return name.split('-').map(it.capitalize()).join('-') } diff --git a/vlib/net/http/http.v b/vlib/net/http/http.v index a58cf08eadf1de..213c14f4898cf1 100644 --- a/vlib/net/http/http.v +++ b/vlib/net/http/http.v @@ -70,7 +70,7 @@ pub fn post(url string, data string) !Response { method: .post url: url data: data - header: new_header(key: .content_type, value: http.content_type_default) + header: new_header(key: .content_type, value: content_type_default) ) } @@ -134,7 +134,7 @@ pub fn put(url string, data string) !Response { method: .put url: url data: data - header: new_header(key: .content_type, value: http.content_type_default) + header: new_header(key: .content_type, value: content_type_default) ) } @@ -144,7 +144,7 @@ pub fn patch(url string, data string) !Response { method: .patch url: url data: data - header: new_header(key: .content_type, value: http.content_type_default) + header: new_header(key: .content_type, value: content_type_default) ) } diff --git a/vlib/net/http/http_proxy_test.v b/vlib/net/http/http_proxy_test.v index 2730298685561d..01660f16afe860 100644 --- a/vlib/net/http/http_proxy_test.v +++ b/vlib/net/http/http_proxy_test.v @@ -12,34 +12,34 @@ const sample_request = &Request{ const sample_path = '/' fn test_proxy_fields() ? { - sample_proxy := new_http_proxy(http.sample_proxy_url)! - sample_auth_proxy := new_http_proxy(http.sample_auth_proxy_url)! + sample_proxy := new_http_proxy(sample_proxy_url)! + sample_auth_proxy := new_http_proxy(sample_auth_proxy_url)! assert sample_proxy.scheme == 'https' assert sample_proxy.host == 'localhost:443' assert sample_proxy.hostname == 'localhost' assert sample_proxy.port == 443 - assert sample_proxy.url == http.sample_proxy_url + assert sample_proxy.url == sample_proxy_url assert sample_auth_proxy.scheme == 'http' assert sample_auth_proxy.username == 'user' assert sample_auth_proxy.password == 'pass' assert sample_auth_proxy.host == 'localhost:8888' assert sample_auth_proxy.hostname == 'localhost' assert sample_auth_proxy.port == 8888 - assert sample_auth_proxy.url == http.sample_auth_proxy_url + assert sample_auth_proxy.url == sample_auth_proxy_url } fn test_proxy_headers() ? { - sample_proxy := new_http_proxy(http.sample_proxy_url)! - headers := sample_proxy.build_proxy_headers(http.sample_host) + sample_proxy := new_http_proxy(sample_proxy_url)! + headers := sample_proxy.build_proxy_headers(sample_host) assert headers == 'CONNECT 127.0.0.1:1337 HTTP/1.1\r\n' + 'Host: 127.0.0.1\r\n' + 'Proxy-Connection: Keep-Alive\r\n\r\n' } fn test_proxy_headers_authenticated() ? { - sample_proxy := new_http_proxy(http.sample_auth_proxy_url)! - headers := sample_proxy.build_proxy_headers(http.sample_host) + sample_proxy := new_http_proxy(sample_auth_proxy_url)! + headers := sample_proxy.build_proxy_headers(sample_host) auth_token := base64.encode(('${sample_proxy.username}:' + '${sample_proxy.password}').bytes()) diff --git a/vlib/net/http/server.v b/vlib/net/http/server.v index f3c2749c26e92a..2fac7a93c3d8e9 100644 --- a/vlib/net/http/server.v +++ b/vlib/net/http/server.v @@ -29,8 +29,8 @@ pub struct Server { mut: state ServerStatus = .closed pub mut: - addr string = ':${http.default_server_port}' - port int = http.default_server_port @[deprecated: 'use addr'] + addr string = ':${default_server_port}' + port int = default_server_port @[deprecated: 'use addr'] handler Handler = DebugHandler{} read_timeout time.Duration = 30 * time.second write_timeout time.Duration = 30 * time.second @@ -55,7 +55,7 @@ pub fn (mut s Server) listen_and_serve() { // remove when s.port is removed addr := s.addr.split(':') - if addr.len > 1 && s.port != http.default_server_port { + if addr.len > 1 && s.port != default_server_port { s.addr = '${addr[0]}:${s.port}' } diff --git a/vlib/net/mbedtls/ssl_connection.c.v b/vlib/net/mbedtls/ssl_connection.c.v index c15e3cf897c11f..46eacddd9bf4c6 100644 --- a/vlib/net/mbedtls/ssl_connection.c.v +++ b/vlib/net/mbedtls/ssl_connection.c.v @@ -13,12 +13,12 @@ fn init() { eprintln(@METHOD) } unsafe { // Unsafe is needed for taking an address of const - C.mbedtls_ctr_drbg_init(&mbedtls.ctr_drbg) - C.mbedtls_entropy_init(&mbedtls.entropy) - ret := C.mbedtls_ctr_drbg_seed(&mbedtls.ctr_drbg, C.mbedtls_entropy_func, &mbedtls.entropy, - 0, 0) + C.mbedtls_ctr_drbg_init(&ctr_drbg) + C.mbedtls_entropy_init(&entropy) + ret := C.mbedtls_ctr_drbg_seed(&ctr_drbg, C.mbedtls_entropy_func, &entropy, 0, + 0) if ret != 0 { - C.mbedtls_ctr_drbg_free(&mbedtls.ctr_drbg) + C.mbedtls_ctr_drbg_free(&ctr_drbg) panic('Failed to seed ssl context: ${ret}') } } @@ -59,7 +59,7 @@ pub fn new_sslcerts_in_memory(verify string, cert string, cert_key string) !&SSL if cert_key != '' { unsafe { ret := C.mbedtls_pk_parse_key(&certs.client_key, cert_key.str, cert_key.len + 1, - 0, 0, C.mbedtls_ctr_drbg_random, &mbedtls.ctr_drbg) + 0, 0, C.mbedtls_ctr_drbg_random, &ctr_drbg) if ret != 0 { return error_with_code('v error', ret) } @@ -86,7 +86,7 @@ pub fn new_sslcerts_from_file(verify string, cert string, cert_key string) !&SSL if cert_key != '' { unsafe { ret := C.mbedtls_pk_parse_keyfile(&certs.client_key, &char(cert_key.str), - 0, C.mbedtls_ctr_drbg_random, &mbedtls.ctr_drbg) + 0, C.mbedtls_ctr_drbg_random, &ctr_drbg) if ret != 0 { return error_with_code('v error', ret) } @@ -179,7 +179,7 @@ fn (mut l SSLListener) init() ! { C.mbedtls_pk_init(&l.certs.client_key) unsafe { - C.mbedtls_ssl_conf_rng(&l.conf, C.mbedtls_ctr_drbg_random, &mbedtls.ctr_drbg) + C.mbedtls_ssl_conf_rng(&l.conf, C.mbedtls_ctr_drbg_random, &ctr_drbg) } mut ret := 0 @@ -362,7 +362,7 @@ fn (mut s SSLConn) init() ! { } unsafe { - C.mbedtls_ssl_conf_rng(&s.conf, C.mbedtls_ctr_drbg_random, &mbedtls.ctr_drbg) + C.mbedtls_ssl_conf_rng(&s.conf, C.mbedtls_ctr_drbg_random, &ctr_drbg) } if s.config.verify != '' || s.config.cert != '' || s.config.cert_key != '' { @@ -384,7 +384,7 @@ fn (mut s SSLConn) init() ! { if s.config.cert_key != '' { unsafe { ret = C.mbedtls_pk_parse_key(&s.certs.client_key, s.config.cert_key.str, - s.config.cert_key.len + 1, 0, 0, C.mbedtls_ctr_drbg_random, &mbedtls.ctr_drbg) + s.config.cert_key.len + 1, 0, 0, C.mbedtls_ctr_drbg_random, &ctr_drbg) } } } else { @@ -397,7 +397,7 @@ fn (mut s SSLConn) init() ! { if s.config.cert_key != '' { unsafe { ret = C.mbedtls_pk_parse_keyfile(&s.certs.client_key, &char(s.config.cert_key.str), - 0, C.mbedtls_ctr_drbg_random, &mbedtls.ctr_drbg) + 0, C.mbedtls_ctr_drbg_random, &ctr_drbg) } } } diff --git a/vlib/net/net_windows.c.v b/vlib/net/net_windows.c.v index 4a21ebabf85977..da9c9705319a81 100644 --- a/vlib/net/net_windows.c.v +++ b/vlib/net/net_windows.c.v @@ -230,7 +230,7 @@ fn init() { mut wsadata := C.WSAData{ lpVendorInfo: 0 } - res := C.WSAStartup(net.wsa_v22, &wsadata) + res := C.WSAStartup(wsa_v22, &wsadata) if res != 0 { panic('socket: WSAStartup failed') } diff --git a/vlib/net/socks/socks5.v b/vlib/net/socks/socks5.v index 47556620b0bfce..d5926a980df032 100644 --- a/vlib/net/socks/socks5.v +++ b/vlib/net/socks/socks5.v @@ -66,18 +66,18 @@ pub fn (sd SOCKS5Dialer) dial(address string) !net.Connection { } fn handshake(mut con net.Connection, host string, username string, password string) !net.Connection { - mut v := [socks.socks_version5, 1] + mut v := [socks_version5, 1] if username.len > 0 { - v << socks.auth_user_password + v << auth_user_password } else { - v << socks.no_auth + v << no_auth } con.write(v)! mut bf := []u8{len: 2} con.read(mut bf)! - if bf[0] != socks.socks_version5 { + if bf[0] != socks_version5 { con.close()! return error('unexpected protocol version ${bf[0]}') } @@ -108,7 +108,7 @@ fn handshake(mut con net.Connection, host string, username string, password stri } } v.clear() - v = [socks.socks_version5, 1, 0] + v = [socks_version5, 1, 0] mut port := host.all_after_last(':').u64() if port == 0 { @@ -117,7 +117,7 @@ fn handshake(mut con net.Connection, host string, username string, password stri address := host.all_before_last(':') if address.contains_only('.1234567890') { // ipv4 - v << socks.addr_type_ipv4 + v << addr_type_ipv4 v << parse_ipv4(address)! } else if address.contains_only(':1234567890abcdf') { // v << addr_type_ipv6 @@ -127,7 +127,7 @@ fn handshake(mut con net.Connection, host string, username string, password stri if address.len > 255 { return error('${address} is too long') } else { - v << socks.addr_type_fqdn + v << addr_type_fqdn v << u8(address.len) v << address.bytes() } diff --git a/vlib/net/tcp.c.v b/vlib/net/tcp.c.v index 735ac6ff3de836..5c5de46da08ee8 100644 --- a/vlib/net/tcp.c.v +++ b/vlib/net/tcp.c.v @@ -66,8 +66,8 @@ pub fn dial_tcp(oaddress string) !&TcpConn { mut conn := &TcpConn{ sock: s - read_timeout: net.tcp_default_read_timeout - write_timeout: net.tcp_default_write_timeout + read_timeout: tcp_default_read_timeout + write_timeout: tcp_default_write_timeout } // The blocking / non-blocking mode is determined before the connection is established. $if net_nonblocking_sockets ? { @@ -114,8 +114,8 @@ pub fn dial_tcp_with_bind(saddr string, laddr string) !&TcpConn { mut conn := &TcpConn{ sock: s - read_timeout: net.tcp_default_read_timeout - write_timeout: net.tcp_default_write_timeout + read_timeout: tcp_default_read_timeout + write_timeout: tcp_default_write_timeout } // The blocking / non-blocking mode is determined before the connection is established. $if net_nonblocking_sockets ? { @@ -392,7 +392,7 @@ pub fn listen_tcp(family AddrFamily, saddr string, options ListenOptions) !&TcpL for { code := error_code() if code in [int(error_einprogress), int(error_ewouldblock), int(error_eagain), C.EINTR] { - @select(s.handle, .read, net.connect_timeout)! + @select(s.handle, .read, connect_timeout)! res = C.listen(s.handle, options.backlog) if res == 0 { break @@ -449,7 +449,7 @@ pub fn (mut l TcpListener) accept_only() !&TcpConn { } mut new_handle := $if is_coroutine ? { - C.photon_accept(l.sock.handle, 0, 0, net.tcp_default_read_timeout) + C.photon_accept(l.sock.handle, 0, 0, tcp_default_read_timeout) } $else { C.accept(l.sock.handle, 0, 0) } @@ -459,7 +459,7 @@ pub fn (mut l TcpListener) accept_only() !&TcpConn { if code in [int(error_einprogress), int(error_ewouldblock), int(error_eagain), C.EINTR] { l.wait_for_accept()! new_handle = $if is_coroutine ? { - C.photon_accept(l.sock.handle, 0, 0, net.tcp_default_read_timeout) + C.photon_accept(l.sock.handle, 0, 0, tcp_default_read_timeout) } $else { C.accept(l.sock.handle, 0, 0) } @@ -471,8 +471,8 @@ pub fn (mut l TcpListener) accept_only() !&TcpConn { return &TcpConn{ handle: new_handle - read_timeout: net.tcp_default_read_timeout - write_timeout: net.tcp_default_write_timeout + read_timeout: tcp_default_read_timeout + write_timeout: tcp_default_write_timeout is_blocking: l.is_blocking } } @@ -635,7 +635,7 @@ const connect_timeout = 5 * time.second fn (mut s TcpSocket) connect(a Addr) ! { $if net_nonblocking_sockets ? { res := $if is_coroutine ? { - C.photon_connect(s.handle, voidptr(&a), a.len(), net.tcp_default_read_timeout) + C.photon_connect(s.handle, voidptr(&a), a.len(), tcp_default_read_timeout) } $else { C.connect(s.handle, voidptr(&a), a.len()) } @@ -655,7 +655,7 @@ fn (mut s TcpSocket) connect(a Addr) ! { // determine whether connect() completed successfully (SO_ERROR is zero) or // unsuccessfully (SO_ERROR is one of the usual error codes listed here, // ex‐ plaining the reason for the failure). - write_result := @select(s.handle, .write, net.connect_timeout)! + write_result := @select(s.handle, .write, connect_timeout)! err := 0 len := sizeof(err) xyz := C.getsockopt(s.handle, C.SOL_SOCKET, C.SO_ERROR, &err, &len) @@ -675,7 +675,7 @@ fn (mut s TcpSocket) connect(a Addr) ! { return } $else { x := $if is_coroutine ? { - C.photon_connect(s.handle, voidptr(&a), a.len(), net.tcp_default_read_timeout) + C.photon_connect(s.handle, voidptr(&a), a.len(), tcp_default_read_timeout) } $else { C.connect(s.handle, voidptr(&a), a.len()) } diff --git a/vlib/net/tcp_read_line.c.v b/vlib/net/tcp_read_line.c.v index cc04f0e0009031..21d86349b8d0de 100644 --- a/vlib/net/tcp_read_line.c.v +++ b/vlib/net/tcp_read_line.c.v @@ -35,7 +35,7 @@ pub fn (mut con TcpConn) set_blocking(state bool) ! { // Note: if you want more control over the buffer, please use a buffered IO // reader instead: `io.new_buffered_reader({reader: io.make_reader(con)})` pub fn (mut con TcpConn) read_line() string { - return con.read_line_max(net.max_read_line_len) + return con.read_line_max(max_read_line_len) } // read_line_max is a *simple*, *non customizable*, blocking line reader. @@ -46,14 +46,14 @@ pub fn (mut con TcpConn) read_line_max(max_line_len int) string { if !con.is_blocking { con.set_blocking(true) or {} } - mut buf := [net.max_read]u8{} // where C.recv will store the network data - mut res := strings.new_builder(net.max_read) // The final result, including the ending \n. + mut buf := [max_read]u8{} // where C.recv will store the network data + mut res := strings.new_builder(max_read) // The final result, including the ending \n. defer { unsafe { res.free() } } bstart := unsafe { &buf[0] } for { - n := C.recv(con.sock.handle, bstart, net.max_read - 1, net.msg_peek | msg_nosignal) + n := C.recv(con.sock.handle, bstart, max_read - 1, msg_peek | msg_nosignal) if n <= 0 { return res.str() } diff --git a/vlib/net/udp.c.v b/vlib/net/udp.c.v index 2d563ed1244453..c167e0236325df 100644 --- a/vlib/net/udp.c.v +++ b/vlib/net/udp.c.v @@ -35,8 +35,8 @@ pub fn dial_udp(raddr string) !&UdpConn { if sock := new_udp_socket_for_remote(addr) { return &UdpConn{ sock: sock - read_timeout: net.udp_default_read_timeout - write_timeout: net.udp_default_write_timeout + read_timeout: udp_default_read_timeout + write_timeout: udp_default_write_timeout } } } @@ -186,8 +186,8 @@ pub fn listen_udp(laddr string) !&UdpConn { addr := addrs[0] return &UdpConn{ sock: new_udp_socket(addr)! - read_timeout: net.udp_default_read_timeout - write_timeout: net.udp_default_write_timeout + read_timeout: udp_default_read_timeout + write_timeout: udp_default_write_timeout } } diff --git a/vlib/net/unix/common.c.v b/vlib/net/unix/common.c.v index 3c5a47e2851470..376fec6a909591 100644 --- a/vlib/net/unix/common.c.v +++ b/vlib/net/unix/common.c.v @@ -47,7 +47,7 @@ fn @select(handle int, test Select, timeout time.Duration) !bool { // infinite timeout is signaled by passing null as the timeout to // select - if timeout == unix.infinite_timeout { + if timeout == infinite_timeout { timeval_timeout = &C.timeval(unsafe { nil }) } diff --git a/vlib/net/unix/stream.c.v b/vlib/net/unix/stream.c.v index 8daa650781aff7..e3b4235eb6b485 100644 --- a/vlib/net/unix/stream.c.v +++ b/vlib/net/unix/stream.c.v @@ -45,8 +45,8 @@ pub fn connect_stream(socket_path string) !&StreamConn { return &StreamConn{ sock: s - read_timeout: unix.unix_default_read_timeout - write_timeout: unix.unix_default_write_timeout + read_timeout: unix_default_read_timeout + write_timeout: unix_default_write_timeout } } @@ -290,14 +290,14 @@ pub fn (mut l StreamListener) accept() !&StreamConn { } mut new_handle := $if is_coroutine ? { - C.photon_accept(l.sock.handle, 0, 0, unix.unix_default_read_timeout) + C.photon_accept(l.sock.handle, 0, 0, unix_default_read_timeout) } $else { C.accept(l.sock.handle, 0, 0) } if new_handle <= 0 { l.wait_for_accept()! new_handle = $if is_coroutine ? { - C.photon_accept(l.sock.handle, 0, 0, unix.unix_default_read_timeout) + C.photon_accept(l.sock.handle, 0, 0, unix_default_read_timeout) } $else { C.accept(l.sock.handle, 0, 0) } @@ -308,8 +308,8 @@ pub fn (mut l StreamListener) accept() !&StreamConn { mut c := &StreamConn{ handle: new_handle - read_timeout: unix.unix_default_read_timeout - write_timeout: unix.unix_default_write_timeout + read_timeout: unix_default_read_timeout + write_timeout: unix_default_write_timeout } c.sock = stream_socket_from_handle(c.handle)! return c @@ -447,7 +447,7 @@ fn (mut s StreamSocket) connect(socket_path string) ! { $if net_nonblocking_sockets ? { res := $if is_coroutine ? { - C.photon_connect(s.handle, voidptr(&addr), alen, unix.unix_default_read_timeout) + C.photon_connect(s.handle, voidptr(&addr), alen, unix_default_read_timeout) } $else { C.connect(s.handle, voidptr(&addr), alen) } @@ -462,7 +462,7 @@ fn (mut s StreamSocket) connect(socket_path string) ! { if ecode == int(net.error_ewouldblock) { // The socket is nonblocking and the connection cannot be completed // immediately. Wait till the socket is ready to write - write_result := s.@select(.write, unix.connect_timeout)! + write_result := s.@select(.write, connect_timeout)! err := 0 len := sizeof(err) // determine whether connect() completed successfully (SO_ERROR is zero) @@ -484,7 +484,7 @@ fn (mut s StreamSocket) connect(socket_path string) ! { return } $else { x := $if is_coroutine ? { - C.photon_connect(s.handle, voidptr(&addr), alen, unix.unix_default_read_timeout) + C.photon_connect(s.handle, voidptr(&addr), alen, unix_default_read_timeout) } $else { C.connect(s.handle, voidptr(&addr), alen) } diff --git a/vlib/net/urllib/urllib.v b/vlib/net/urllib/urllib.v index 4fad41208fb7a5..22ec412c8c8137 100644 --- a/vlib/net/urllib/urllib.v +++ b/vlib/net/urllib/urllib.v @@ -165,7 +165,7 @@ fn unescape(s_ string, mode EncodingMode) !string { if s.len > 3 { s = s[..3] } - return error(error_msg(urllib.err_msg_escape, s)) + return error(error_msg(err_msg_escape, s)) } // Per https://tools.ietf.org/html/rfc3986#page-21 // in the host component %-encoding can only be used @@ -175,7 +175,7 @@ fn unescape(s_ string, mode EncodingMode) !string { // in IPv6 scoped-address literals. Yay. if i + 3 >= s.len && mode == .encode_host && unhex(s[i + 1]) < 8 && s[i..i + 3] != '%25' { - return error(error_msg(urllib.err_msg_escape, s[i..i + 3])) + return error(error_msg(err_msg_escape, s[i..i + 3])) } if mode == .encode_zone { // RFC 6874 says basically 'anything goes' for zone identifiers @@ -190,7 +190,7 @@ fn unescape(s_ string, mode EncodingMode) !string { } v := ((unhex(s[i + 1]) << u8(4)) | unhex(s[i + 2])) if s[i..i + 3] != '%25' && v != ` ` && should_escape(v, .encode_host) { - error(error_msg(urllib.err_msg_escape, s[i..i + 3])) + error(error_msg(err_msg_escape, s[i..i + 3])) } } i += 3 @@ -436,12 +436,11 @@ fn split(s string, sep u8, cutc bool) (string, string) { pub fn parse(rawurl string) !URL { // Cut off #frag u, frag := split(rawurl, `#`, true) - mut url := parse_url(u, false) or { return error(error_msg(urllib.err_msg_parse, u)) } + mut url := parse_url(u, false) or { return error(error_msg(err_msg_parse, u)) } if frag == '' { return url } - f := unescape(frag, .encode_fragment) or { return error(error_msg(urllib.err_msg_parse, - u)) } + f := unescape(frag, .encode_fragment) or { return error(error_msg(err_msg_parse, u)) } url.fragment = f return url } diff --git a/vlib/net/util.v b/vlib/net/util.v index 47d6c12b2738cb..75276a3d4550ed 100644 --- a/vlib/net/util.v +++ b/vlib/net/util.v @@ -5,7 +5,7 @@ const socket_max_port = u16(0xFFFF) // validate_port checks whether a port is valid // and returns the port or an error pub fn validate_port(port int) !u16 { - if port <= net.socket_max_port { + if port <= socket_max_port { return u16(port) } else { return err_port_out_of_range diff --git a/vlib/net/websocket/message.v b/vlib/net/websocket/message.v index 31a3cdfdb27075..d230342073a317 100644 --- a/vlib/net/websocket/message.v +++ b/vlib/net/websocket/message.v @@ -222,7 +222,7 @@ pub fn (mut ws Client) parse_frame_header() !Frame { buffer[bytes_read] = rbuff[0] bytes_read++ // parses the first two header bytes to get basic frame information - if bytes_read == websocket.header_len_offset { + if bytes_read == header_len_offset { frame.fin = (buffer[0] & 0x80) == 0x80 frame.rsv1 = (buffer[0] & 0x40) == 0x40 frame.rsv2 = (buffer[0] & 0x20) == 0x20 @@ -233,11 +233,11 @@ pub fn (mut ws Client) parse_frame_header() !Frame { // if the frame has a mask, set the byte position where the mask ends if frame.has_mask { mask_end_byte = if frame.payload_len < 126 { - websocket.header_len_offset + 4 + header_len_offset + 4 } else if frame.payload_len == 126 { - websocket.header_len_offset + 6 + header_len_offset + 6 } else if frame.payload_len == 127 { - websocket.header_len_offset + 12 + header_len_offset + 12 } else { 0 } // impossible @@ -248,7 +248,7 @@ pub fn (mut ws Client) parse_frame_header() !Frame { break } } - if frame.payload_len == 126 && bytes_read == websocket.extended_payload16_end_byte { + if frame.payload_len == 126 && bytes_read == extended_payload16_end_byte { frame.header_len += 2 frame.payload_len = 0 frame.payload_len |= int(u32(buffer[2]) << 8) @@ -258,7 +258,7 @@ pub fn (mut ws Client) parse_frame_header() !Frame { break } } - if frame.payload_len == 127 && bytes_read == websocket.extended_payload64_end_byte { + if frame.payload_len == 127 && bytes_read == extended_payload64_end_byte { frame.header_len += 8 // these shift operators needs 64 bit on clang with -prod flag mut payload_len := u64(0) diff --git a/vlib/net/websocket/websocket_client.v b/vlib/net/websocket/websocket_client.v index b6f739ea5151e6..9d8687d2467639 100644 --- a/vlib/net/websocket/websocket_client.v +++ b/vlib/net/websocket/websocket_client.v @@ -368,7 +368,7 @@ fn (mut ws Client) send_control_frame(code OPCode, frame_typ string, payload []u header_len := if ws.is_server { 2 } else { 6 } frame_len := header_len + payload.len mut control_frame := []u8{len: frame_len} - mut masking_key := if !ws.is_server { create_masking_key() } else { websocket.empty_bytearr } + mut masking_key := if !ws.is_server { create_masking_key() } else { empty_bytearr } defer { unsafe { control_frame.free() diff --git a/vlib/orm/orm.v b/vlib/orm/orm.v index 0722186e89b75d..b5a4d1f50c1fb1 100644 --- a/vlib/orm/orm.v +++ b/vlib/orm/orm.v @@ -593,10 +593,10 @@ fn sql_field_type(field TableField) int { for attr in field.attrs { if attr.kind == .plain && attr.name == 'sql' && attr.arg != '' { if attr.arg.to_lower() == 'serial' { - typ = orm.serial + typ = serial break } - typ = orm.type_idx[attr.arg] + typ = type_idx[attr.arg] break } } @@ -622,7 +622,7 @@ fn bool_to_primitive(b bool) Primitive { } fn option_bool_to_primitive(b ?bool) Primitive { - return if b_ := b { Primitive(b_) } else { orm.null_primitive } + return if b_ := b { Primitive(b_) } else { null_primitive } } fn f32_to_primitive(b f32) Primitive { @@ -630,7 +630,7 @@ fn f32_to_primitive(b f32) Primitive { } fn option_f32_to_primitive(b ?f32) Primitive { - return if b_ := b { Primitive(b_) } else { orm.null_primitive } + return if b_ := b { Primitive(b_) } else { null_primitive } } fn f64_to_primitive(b f64) Primitive { @@ -638,7 +638,7 @@ fn f64_to_primitive(b f64) Primitive { } fn option_f64_to_primitive(b ?f64) Primitive { - return if b_ := b { Primitive(b_) } else { orm.null_primitive } + return if b_ := b { Primitive(b_) } else { null_primitive } } fn i8_to_primitive(b i8) Primitive { @@ -646,7 +646,7 @@ fn i8_to_primitive(b i8) Primitive { } fn option_i8_to_primitive(b ?i8) Primitive { - return if b_ := b { Primitive(b_) } else { orm.null_primitive } + return if b_ := b { Primitive(b_) } else { null_primitive } } fn i16_to_primitive(b i16) Primitive { @@ -654,7 +654,7 @@ fn i16_to_primitive(b i16) Primitive { } fn option_i16_to_primitive(b ?i16) Primitive { - return if b_ := b { Primitive(b_) } else { orm.null_primitive } + return if b_ := b { Primitive(b_) } else { null_primitive } } fn int_to_primitive(b int) Primitive { @@ -662,7 +662,7 @@ fn int_to_primitive(b int) Primitive { } fn option_int_to_primitive(b ?int) Primitive { - return if b_ := b { Primitive(b_) } else { orm.null_primitive } + return if b_ := b { Primitive(b_) } else { null_primitive } } // int_literal_to_primitive handles int literal value @@ -671,7 +671,7 @@ fn int_literal_to_primitive(b int) Primitive { } fn option_int_literal_to_primitive(b ?int) Primitive { - return if b_ := b { Primitive(b_) } else { orm.null_primitive } + return if b_ := b { Primitive(b_) } else { null_primitive } } // float_literal_to_primitive handles float literal value @@ -680,7 +680,7 @@ fn float_literal_to_primitive(b f64) Primitive { } fn option_float_literal_to_primitive(b ?f64) Primitive { - return if b_ := b { Primitive(b_) } else { orm.null_primitive } + return if b_ := b { Primitive(b_) } else { null_primitive } } fn i64_to_primitive(b i64) Primitive { @@ -688,7 +688,7 @@ fn i64_to_primitive(b i64) Primitive { } fn option_i64_to_primitive(b ?i64) Primitive { - return if b_ := b { Primitive(b_) } else { orm.null_primitive } + return if b_ := b { Primitive(b_) } else { null_primitive } } fn u8_to_primitive(b u8) Primitive { @@ -696,7 +696,7 @@ fn u8_to_primitive(b u8) Primitive { } fn option_u8_to_primitive(b ?u8) Primitive { - return if b_ := b { Primitive(b_) } else { orm.null_primitive } + return if b_ := b { Primitive(b_) } else { null_primitive } } fn u16_to_primitive(b u16) Primitive { @@ -704,7 +704,7 @@ fn u16_to_primitive(b u16) Primitive { } fn option_u16_to_primitive(b ?u16) Primitive { - return if b_ := b { Primitive(b_) } else { orm.null_primitive } + return if b_ := b { Primitive(b_) } else { null_primitive } } fn u32_to_primitive(b u32) Primitive { @@ -712,7 +712,7 @@ fn u32_to_primitive(b u32) Primitive { } fn option_u32_to_primitive(b ?u32) Primitive { - return if b_ := b { Primitive(b_) } else { orm.null_primitive } + return if b_ := b { Primitive(b_) } else { null_primitive } } fn u64_to_primitive(b u64) Primitive { @@ -720,7 +720,7 @@ fn u64_to_primitive(b u64) Primitive { } fn option_u64_to_primitive(b ?u64) Primitive { - return if b_ := b { Primitive(b_) } else { orm.null_primitive } + return if b_ := b { Primitive(b_) } else { null_primitive } } fn string_to_primitive(b string) Primitive { @@ -728,7 +728,7 @@ fn string_to_primitive(b string) Primitive { } fn option_string_to_primitive(b ?string) Primitive { - return if b_ := b { Primitive(b_) } else { orm.null_primitive } + return if b_ := b { Primitive(b_) } else { null_primitive } } fn time_to_primitive(b time.Time) Primitive { @@ -736,7 +736,7 @@ fn time_to_primitive(b time.Time) Primitive { } fn option_time_to_primitive(b ?time.Time) Primitive { - return if b_ := b { Primitive(b_) } else { orm.null_primitive } + return if b_ := b { Primitive(b_) } else { null_primitive } } fn infix_to_primitive(b InfixType) Primitive { diff --git a/vlib/os/filepath.v b/vlib/os/filepath.v index e32872bbeca2cc..0fafc2f031e826 100644 --- a/vlib/os/filepath.v +++ b/vlib/os/filepath.v @@ -24,7 +24,7 @@ pub fn is_abs_path(path string) bool { $if windows { return is_unc_path(path) || is_drive_rooted(path) || is_normal_path(path) } - return path[0] == os.fslash + return path[0] == fslash } // abs_path joins the current working directory @@ -36,7 +36,7 @@ pub fn abs_path(path string) string { return wd } npath := norm_path(path) - if npath == os.dot_str { + if npath == dot_str { return wd } if !is_abs_path(npath) { @@ -58,22 +58,22 @@ pub fn abs_path(path string) string { @[direct_array_access] pub fn norm_path(path string) string { if path == '' { - return os.dot_str + return dot_str } rooted := is_abs_path(path) // get the volume name from the path // if the current operating system is Windows volume_len := win_volume_len(path) mut volume := path[..volume_len] - if volume_len != 0 && volume.contains(os.fslash_str) { - volume = volume.replace(os.fslash_str, path_separator) + if volume_len != 0 && volume.contains(fslash_str) { + volume = volume.replace(fslash_str, path_separator) } cpath := clean_path(path[volume_len..]) if cpath == '' && volume_len == 0 { - return os.dot_str + return dot_str } spath := cpath.split(path_separator) - if os.dot_dot !in spath { + if dot_dot !in spath { return if volume_len != 0 { volume + cpath } else { cpath } } // resolve backlinks (..) @@ -86,10 +86,10 @@ pub fn norm_path(path string) string { mut backlink_count := 0 for i := spath_len - 1; i >= 0; i-- { part := spath[i] - if part == os.empty_str { + if part == empty_str { continue } - if part == os.dot_dot { + if part == dot_dot { backlink_count++ continue } @@ -103,7 +103,7 @@ pub fn norm_path(path string) string { // is not possible and the given path is not rooted if backlink_count != 0 && !rooted { for i in 0 .. backlink_count { - sb.write_string(os.dot_dot) + sb.write_string(dot_dot) if new_path.len == 0 && i == backlink_count - 1 { break } @@ -117,7 +117,7 @@ pub fn norm_path(path string) string { return volume } if !rooted { - return os.dot_str + return dot_str } return path_separator } @@ -181,7 +181,7 @@ pub fn existing_path(path string) !string { // - the last path separator fn clean_path(path string) string { if path == '' { - return os.empty_str + return empty_str } mut sb := strings.new_builder(path.len) mut sc := textscanner.new(path) @@ -203,8 +203,8 @@ fn clean_path(path string) string { } // turn forward slash into a back slash on a Windows system $if windows { - if curr == os.fslash { - sb.write_u8(os.bslash) + if curr == fslash { + sb.write_u8(bslash) continue } } @@ -271,9 +271,9 @@ fn win_volume_len(path string) int { fn is_slash(b u8) bool { $if windows { - return b == os.bslash || b == os.fslash + return b == bslash || b == fslash } - return b == os.fslash + return b == fslash } fn is_unc_path(path string) bool { @@ -307,7 +307,7 @@ fn is_normal_path(path string) bool { // a reference to a current directory (.). // NOTE: a negative integer means that no byte is present fn is_curr_dir_ref(byte_one int, byte_two int, byte_three int) bool { - if u8(byte_two) != os.dot { + if u8(byte_two) != dot { return false } return (byte_one < 0 || is_slash(u8(byte_one))) && (byte_three < 0 || is_slash(u8(byte_three))) diff --git a/vlib/os/notify/backend_darwin.c.v b/vlib/os/notify/backend_darwin.c.v index 5bef7f0dbbbf35..f4e68775c38bf4 100644 --- a/vlib/os/notify/backend_darwin.c.v +++ b/vlib/os/notify/backend_darwin.c.v @@ -109,8 +109,8 @@ fn (mut kn KqueueNotifier) modify(fd int, events FdEventType, conf ...FdConfigFl // remove removes a file descriptor from the watch list fn (mut kn KqueueNotifier) remove(fd int) ! { - filter := notify.kqueue_read | notify.kqueue_write | notify.kqueue_exception - flags := notify.kqueue_delete + filter := kqueue_read | kqueue_write | kqueue_exception + flags := kqueue_delete kn.ctl(fd, filter, flags)! } @@ -160,20 +160,20 @@ fn (mut kn KqueueNotifier) close() ! { fn event_mask_to_flag(filter i16, flags u16) FdEventType { mut res := unsafe { FdEventType(0) } - if filter & notify.kqueue_read != 0 { + if filter & kqueue_read != 0 { res.set(.read) } - if filter & notify.kqueue_write != 0 { + if filter & kqueue_write != 0 { res.set(.write) } - if filter & notify.kqueue_exception != 0 { + if filter & kqueue_exception != 0 { res.set(.exception) } - if flags & notify.kqueue_eof != 0 { + if flags & kqueue_eof != 0 { res.set(.hangup) } - if flags & notify.kqueue_error != 0 { + if flags & kqueue_error != 0 { res.set(.error) } @@ -185,13 +185,13 @@ fn event_mask_to_flag(filter i16, flags u16) FdEventType { fn filter_to_mask(events FdEventType) i16 { mut mask := i16(0) if events.has(.read) { - mask |= notify.kqueue_read + mask |= kqueue_read } if events.has(.write) { - mask |= notify.kqueue_write + mask |= kqueue_write } if events.has(.exception) { - mask |= notify.kqueue_exception + mask |= kqueue_exception } if events.has(.peer_hangup) { panic("Kqueue does not support 'peer_hangup' event type.") @@ -208,13 +208,13 @@ fn filter_to_mask(events FdEventType) i16 { // flags_to_mask is a helper function that converts FdConfigFlags // to a bitmask used by the C functions fn flags_to_mask(confs ...FdConfigFlags) u16 { - mut mask := notify.kqueue_add | notify.kqueue_enable + mut mask := kqueue_add | kqueue_enable for conf in confs { if conf.has(.edge_trigger) { - mask |= notify.kqueue_edge_trigger + mask |= kqueue_edge_trigger } if conf.has(.one_shot) { - mask |= notify.kqueue_oneshot + mask |= kqueue_oneshot } if conf.has(.wake_up) { panic("Kqueue does not support 'wake_up' flag.") diff --git a/vlib/os/notify/backend_linux.c.v b/vlib/os/notify/backend_linux.c.v index d3d257a0dc832e..1a35cc3e0fb9c3 100644 --- a/vlib/os/notify/backend_linux.c.v +++ b/vlib/os/notify/backend_linux.c.v @@ -140,22 +140,22 @@ fn (mut en EpollNotifier) close() ! { fn event_mask_to_flag(mask u32) FdEventType { mut flags := unsafe { FdEventType(0) } - if mask & notify.epoll_read != 0 { + if mask & epoll_read != 0 { flags.set(.read) } - if mask & notify.epoll_write != 0 { + if mask & epoll_write != 0 { flags.set(.write) } - if mask & notify.epoll_peer_hangup != 0 { + if mask & epoll_peer_hangup != 0 { flags.set(.peer_hangup) } - if mask & notify.epoll_exception != 0 { + if mask & epoll_exception != 0 { flags.set(.exception) } - if mask & notify.epoll_error != 0 { + if mask & epoll_error != 0 { flags.set(.error) } - if mask & notify.epoll_hangup != 0 { + if mask & epoll_hangup != 0 { flags.set(.hangup) } @@ -167,35 +167,35 @@ fn event_mask_to_flag(mask u32) FdEventType { fn flags_to_mask(events FdEventType, confs ...FdConfigFlags) u32 { mut mask := u32(0) if events.has(.read) { - mask |= notify.epoll_read + mask |= epoll_read } if events.has(.write) { - mask |= notify.epoll_write + mask |= epoll_write } if events.has(.peer_hangup) { - mask |= notify.epoll_peer_hangup + mask |= epoll_peer_hangup } if events.has(.exception) { - mask |= notify.epoll_exception + mask |= epoll_exception } if events.has(.error) { - mask |= notify.epoll_error + mask |= epoll_error } if events.has(.hangup) { - mask |= notify.epoll_hangup + mask |= epoll_hangup } for conf in confs { if conf.has(.edge_trigger) { - mask |= notify.epoll_edge_trigger + mask |= epoll_edge_trigger } if conf.has(.one_shot) { - mask |= notify.epoll_one_shot + mask |= epoll_one_shot } if conf.has(.wake_up) { - mask |= notify.epoll_wake_up + mask |= epoll_wake_up } if conf.has(.exclusive) { - mask |= notify.epoll_exclusive + mask |= epoll_exclusive } } return mask diff --git a/vlib/os/os.c.v b/vlib/os/os.c.v index 7f89b151776bc5..1b973a1585d95a 100644 --- a/vlib/os/os.c.v +++ b/vlib/os/os.c.v @@ -89,10 +89,10 @@ const buf_size = 4096 // but to read the file in `buf_size` chunks. @[manualfree] fn slurp_file_in_builder(fp &C.FILE) !strings.Builder { - buf := [os.buf_size]u8{} - mut sb := strings.new_builder(os.buf_size) + buf := [buf_size]u8{} + mut sb := strings.new_builder(buf_size) for { - mut read_bytes := fread(&buf[0], 1, os.buf_size, fp) or { + mut read_bytes := fread(&buf[0], 1, buf_size, fp) or { if err is Eof { break } @@ -1044,7 +1044,7 @@ pub const error_code_not_set = int(0x7EFEFEFE) pub struct SystemError { pub: msg string - code int = os.error_code_not_set + code int = error_code_not_set } // Return a POSIX error: @@ -1052,7 +1052,7 @@ pub: // Message defaults to POSIX error message for the error code @[inline] pub fn error_posix(e SystemError) IError { - code := if e.code == os.error_code_not_set { C.errno } else { e.code } + code := if e.code == error_code_not_set { C.errno } else { e.code } message := if e.msg == '' { posix_get_error_msg(code) } else { e.msg } return error_with_code(message, code) } @@ -1063,7 +1063,7 @@ pub fn error_posix(e SystemError) IError { @[inline] pub fn error_win32(e SystemError) IError { $if windows { - code := if e.code == os.error_code_not_set { int(C.GetLastError()) } else { e.code } + code := if e.code == error_code_not_set { int(C.GetLastError()) } else { e.code } message := if e.msg == '' { get_error_msg(code) } else { e.msg } return error_with_code(message, code) } $else { diff --git a/vlib/os/os.v b/vlib/os/os.v index 183500bb321449..6f89bfa34aad36 100644 --- a/vlib/os/os.v +++ b/vlib/os/os.v @@ -58,7 +58,7 @@ fn executable_fallback() string { other_separator := if path_separator == '/' { '\\' } else { '/' } rexepath := exepath.replace(other_separator, path_separator) if rexepath.contains(path_separator) { - exepath = join_path_single(os.wd_at_startup, exepath) + exepath = join_path_single(wd_at_startup, exepath) } else { // no choice but to try to walk the PATH folders :-| ... foundpath := find_abs_path_of_executable(exepath) or { '' } diff --git a/vlib/os/os_nix.c.v b/vlib/os/os_nix.c.v index 0fabdb860905a8..95429506fbe104 100644 --- a/vlib/os/os_nix.c.v +++ b/vlib/os/os_nix.c.v @@ -125,8 +125,8 @@ fn glob_match(dir string, pattern string, next_pattern string, mut matches []str } for file in files { mut fpath := file - f := if file.contains(os.path_separator) { - pathwalk := file.split(os.path_separator) + f := if file.contains(path_separator) { + pathwalk := file.split(path_separator) pathwalk[pathwalk.len - 1] } else { fpath = if dir == '.' { file } else { '${dir}/${file}' } @@ -160,7 +160,7 @@ fn glob_match(dir string, pattern string, next_pattern string, mut matches []str if is_dir(fpath) { subdirs << fpath if next_pattern == pattern && next_pattern != '' { - matches << '${fpath}${os.path_separator}' + matches << '${fpath}${path_separator}' } } else { matches << fpath @@ -171,8 +171,8 @@ fn glob_match(dir string, pattern string, next_pattern string, mut matches []str } fn native_glob_pattern(pattern string, mut matches []string) ! { - steps := pattern.split(os.path_separator) - cwd := if pattern.starts_with(os.path_separator) { os.path_separator } else { '.' } + steps := pattern.split(path_separator) + cwd := if pattern.starts_with(path_separator) { path_separator } else { '.' } mut subdirs := [cwd] for i := 0; i < steps.len; i++ { step := steps[i] @@ -180,7 +180,7 @@ fn native_glob_pattern(pattern string, mut matches []string) ! { if step == '' { continue } - if is_dir('${cwd}${os.path_separator}${step}') { + if is_dir('${cwd}${path_separator}${step}') { dd := if cwd == '/' { step } else { diff --git a/vlib/os/os_windows.c.v b/vlib/os/os_windows.c.v index ad6280949b4c5e..69498c5b6ffcc1 100644 --- a/vlib/os/os_windows.c.v +++ b/vlib/os/os_windows.c.v @@ -265,10 +265,10 @@ const max_error_code = 15841 fn ptr_win_get_error_msg(code u32) voidptr { mut buf := unsafe { nil } // Check for code overflow - if code > u32(os.max_error_code) { + if code > u32(max_error_code) { return buf } - C.FormatMessageW(os.format_message_allocate_buffer | os.format_message_from_system | os.format_message_ignore_inserts, + C.FormatMessageW(format_message_allocate_buffer | format_message_from_system | format_message_ignore_inserts, 0, code, 0, voidptr(&buf), 0, 0) return buf } diff --git a/vlib/picoev/picoev.v b/vlib/picoev/picoev.v index 1715241f56d2d0..df0c9083bcfbd3 100644 --- a/vlib/picoev/picoev.v +++ b/vlib/picoev/picoev.v @@ -88,11 +88,11 @@ pub: // init fills the `file_descriptors` array pub fn (mut pv Picoev) init() { - assert picoev.max_fds > 0 + assert max_fds > 0 pv.num_loops = 0 - for i in 0 .. picoev.max_fds { + for i in 0 .. max_fds { pv.file_descriptors[i] = &Target{} } } @@ -100,7 +100,7 @@ pub fn (mut pv Picoev) init() { // add a file descriptor to the event loop @[direct_array_access] pub fn (mut pv Picoev) add(fd int, events int, timeout int, callback voidptr) int { - if pv == unsafe { nil } || fd < 0 || fd >= picoev.max_fds { + if pv == unsafe { nil } || fd < 0 || fd >= max_fds { return -1 // Invalid arguments } @@ -110,7 +110,7 @@ pub fn (mut pv Picoev) add(fd int, events int, timeout int, callback voidptr) in target.loop_id = pv.loop.id target.events = 0 - if pv.update_events(fd, events | picoev.picoev_add) != 0 { + if pv.update_events(fd, events | picoev_add) != 0 { if pv.delete(fd) != 0 { eprintln('Error during del') } @@ -132,7 +132,7 @@ pub fn (mut pv Picoev) del(fd int) int { // remove a file descriptor from the event loop @[direct_array_access] pub fn (mut pv Picoev) delete(fd int) int { - if fd < 0 || fd >= picoev.max_fds { + if fd < 0 || fd >= max_fds { return -1 // Invalid fd } @@ -142,7 +142,7 @@ pub fn (mut pv Picoev) delete(fd int) int { eprintln('remove ${fd}') } - if pv.update_events(fd, picoev.picoev_del) != 0 { + if pv.update_events(fd, picoev_del) != 0 { eprintln('Error during update_events. event: `picoev.picoev_del`') return -1 } @@ -177,7 +177,7 @@ fn (mut pv Picoev) loop_once(max_wait_in_sec int) int { // the file descriptors target callback is called with a timeout event @[direct_array_access; inline] fn (mut pv Picoev) set_timeout(fd int, secs int) { - assert fd < picoev.max_fds + assert fd < max_fds if secs != 0 { pv.timeouts[fd] = pv.loop.now + secs } else { @@ -202,7 +202,7 @@ fn (mut pv Picoev) handle_timeout() { target := pv.file_descriptors[fd] assert target.loop_id == pv.loop.id pv.timeouts.delete(fd) - unsafe { target.cb(fd, picoev.picoev_timeout, &pv) } + unsafe { target.cb(fd, picoev_timeout, &pv) } } } @@ -220,7 +220,7 @@ fn accept_callback(listen_fd int, events int, cb_arg voidptr) { return } - if accepted_fd >= picoev.max_fds { + if accepted_fd >= max_fds { // should never happen close_socket(accepted_fd) return @@ -237,7 +237,7 @@ fn accept_callback(listen_fd int, events int, cb_arg voidptr) { close_socket(accepted_fd) // Close fd on failure return } - pv.add(accepted_fd, picoev.picoev_read, pv.timeout_secs, raw_callback) + pv.add(accepted_fd, picoev_read, pv.timeout_secs, raw_callback) } // close_conn closes the socket `fd` and removes it from the loop @@ -257,7 +257,7 @@ fn raw_callback(fd int, events int, context voidptr) { pv.idx[fd] = 0 } - if events & picoev.picoev_timeout != 0 { + if events & picoev_timeout != 0 { $if trace_fd ? { eprintln('timeout ${fd}') } @@ -269,7 +269,7 @@ fn raw_callback(fd int, events int, context voidptr) { pv.close_conn(fd) return - } else if events & picoev.picoev_read != 0 { + } else if events & picoev_read != 0 { pv.set_timeout(fd, pv.timeout_secs) if !isnil(pv.raw_callback) { pv.raw_callback(mut pv, fd, events) @@ -334,7 +334,7 @@ fn raw_callback(fd int, events int, context voidptr) { // Callback (should call .end() itself) pv.cb(pv.user_data, req, mut &res) - } else if events & picoev.picoev_write != 0 { + } else if events & picoev_write != 0 { pv.set_timeout(fd, pv.timeout_secs) if !isnil(pv.raw_callback) { pv.raw_callback(mut pv, fd, events) @@ -368,8 +368,8 @@ pub fn new(config Config) !&Picoev { } if isnil(pv.raw_callback) { - pv.buf = unsafe { malloc_noscan(picoev.max_fds * config.max_read + 1) } - pv.out = unsafe { malloc_noscan(picoev.max_fds * config.max_write + 1) } + pv.buf = unsafe { malloc_noscan(max_fds * config.max_read + 1) } + pv.out = unsafe { malloc_noscan(max_fds * config.max_write + 1) } } // epoll on linux @@ -391,7 +391,7 @@ pub fn new(config Config) !&Picoev { pv.init() - pv.add(listening_socket_fd, picoev.picoev_read, 0, accept_callback) + pv.add(listening_socket_fd, picoev_read, 0, accept_callback) return pv } diff --git a/vlib/picohttpparser/misc.v b/vlib/picohttpparser/misc.v index fe67a619868510..2d1cd72ae724be 100644 --- a/vlib/picohttpparser/misc.v +++ b/vlib/picohttpparser/misc.v @@ -32,15 +32,15 @@ pub fn u64toa(buf_start &u8, value u64) !int { d2 := u32((v % 100) << 1) unsafe { if v >= 1000 { - *buf++ = picohttpparser.g_digits_lut[d1] + *buf++ = g_digits_lut[d1] } if v >= 100 { - *buf++ = picohttpparser.g_digits_lut[d1 + 1] + *buf++ = g_digits_lut[d1 + 1] } if v >= 10 { - *buf++ = picohttpparser.g_digits_lut[d2] + *buf++ = g_digits_lut[d2] } - *buf++ = picohttpparser.g_digits_lut[d2 + 1] + *buf++ = g_digits_lut[d2 + 1] } } else { b := v / 10_000 @@ -54,20 +54,20 @@ pub fn u64toa(buf_start &u8, value u64) !int { unsafe { if value >= 10_000_000 { - *buf++ = picohttpparser.g_digits_lut[d1] + *buf++ = g_digits_lut[d1] } if value >= 1_000_000 { - *buf++ = picohttpparser.g_digits_lut[d1 + 1] + *buf++ = g_digits_lut[d1 + 1] } if value >= 100_000 { - *buf++ = picohttpparser.g_digits_lut[d2] + *buf++ = g_digits_lut[d2] } - *buf++ = picohttpparser.g_digits_lut[d2 + 1] + *buf++ = g_digits_lut[d2 + 1] - *buf++ = picohttpparser.g_digits_lut[d3] - *buf++ = picohttpparser.g_digits_lut[d3 + 1] - *buf++ = picohttpparser.g_digits_lut[d4] - *buf++ = picohttpparser.g_digits_lut[d4 + 1] + *buf++ = g_digits_lut[d3] + *buf++ = g_digits_lut[d3 + 1] + *buf++ = g_digits_lut[d4] + *buf++ = g_digits_lut[d4 + 1] } } diff --git a/vlib/picohttpparser/picohttpparser.v b/vlib/picohttpparser/picohttpparser.v index 46e7749f79e759..40e58287ab1417 100644 --- a/vlib/picohttpparser/picohttpparser.v +++ b/vlib/picohttpparser/picohttpparser.v @@ -214,7 +214,7 @@ fn (mut r Request) parse_headers(buf_start &u8, buf_end &u8, mut pret Pret) &u8 // http://www.mozilla.org/security/announce/2006/mfsa2006-33.html for *buf != `:` { // check if the current character is allowed in an HTTP header - if picohttpparser.token_char_map[*buf] == 0 { + if token_char_map[*buf] == 0 { $if trace_parse ? { eprintln('invalid character! ${*buf}') } diff --git a/vlib/rand/mini_math.v b/vlib/rand/mini_math.v index bd4dccc2b30367..eb11823ad2739d 100644 --- a/vlib/rand/mini_math.v +++ b/vlib/rand/mini_math.v @@ -26,7 +26,7 @@ fn msqrt(a f64) f64 { // relative error of approximation = 7.47e-3 x = 4.173075996388649989089e-1 + 5.9016206709064458299663e-1 * z // adjust for odd powers of 2 if (ex & 1) != 0 { - x *= rand.sqrt2 + x *= sqrt2 } x = scalbn(x, ex >> 1) // newton iterations @@ -49,7 +49,7 @@ fn mlog(a f64) f64 { l7 := 0.1479819860511658591 x := a mut f1, mut ki := frexp(x) - if f1 < rand.sqrt2 / 2 { + if f1 < sqrt2 / 2 { f1 *= 2 ki-- } diff --git a/vlib/rand/mt19937/mt19937.v b/vlib/rand/mt19937/mt19937.v index 55c302f42bdb44..622ed04bc93f42 100644 --- a/vlib/rand/mt19937/mt19937.v +++ b/vlib/rand/mt19937/mt19937.v @@ -62,11 +62,11 @@ pub struct MT19937RNG { buffer.PRNGBuffer mut: state []u64 = get_first_state(seed.time_seed_array(2)) - mti int = mt19937.nn + mti int = nn } fn get_first_state(seed_data []u32) []u64 { - mut state := []u64{len: mt19937.nn} + mut state := []u64{len: nn} calculate_state(seed_data, mut state) return state } @@ -76,7 +76,7 @@ fn calculate_state(seed_data []u32, mut state []u64) []u64 { lo := u64(seed_data[0]) hi := u64(seed_data[1]) state[0] = u64((hi << 32) | lo) - for j := 1; j < mt19937.nn; j++ { + for j := 1; j < nn; j++ { state[j] = u64(6364136223846793005) * (state[j - 1] ^ (state[j - 1] >> 62)) + u64(j) } return *state @@ -90,7 +90,7 @@ pub fn (mut rng MT19937RNG) seed(seed_data []u32) { exit(1) } rng.state = calculate_state(seed_data, mut rng.state) - rng.mti = mt19937.nn + rng.mti = nn rng.bytes_left = 0 rng.buffer = 0 } @@ -149,18 +149,18 @@ const mag01 = [u64(0), u64(matrix_a)] pub fn (mut rng MT19937RNG) u64() u64 { mut x := u64(0) mut i := int(0) - if rng.mti >= mt19937.nn { - for i = 0; i < mt19937.nn - mt19937.mm; i++ { - x = (rng.state[i] & mt19937.um) | (rng.state[i + 1] & mt19937.lm) - rng.state[i] = rng.state[i + mt19937.mm] ^ (x >> 1) ^ mt19937.mag01[int(x & 1)] + if rng.mti >= nn { + for i = 0; i < nn - mm; i++ { + x = (rng.state[i] & um) | (rng.state[i + 1] & lm) + rng.state[i] = rng.state[i + mm] ^ (x >> 1) ^ mag01[int(x & 1)] } - for i < mt19937.nn - 1 { - x = (rng.state[i] & mt19937.um) | (rng.state[i + 1] & mt19937.lm) - rng.state[i] = rng.state[i + (mt19937.mm - mt19937.nn)] ^ (x >> 1) ^ mt19937.mag01[int(x & 1)] + for i < nn - 1 { + x = (rng.state[i] & um) | (rng.state[i + 1] & lm) + rng.state[i] = rng.state[i + (mm - nn)] ^ (x >> 1) ^ mag01[int(x & 1)] i++ } - x = (rng.state[mt19937.nn - 1] & mt19937.um) | (rng.state[0] & mt19937.lm) - rng.state[mt19937.nn - 1] = rng.state[mt19937.mm - 1] ^ (x >> 1) ^ mt19937.mag01[int(x & 1)] + x = (rng.state[nn - 1] & um) | (rng.state[0] & lm) + rng.state[nn - 1] = rng.state[mm - 1] ^ (x >> 1) ^ mag01[int(x & 1)] rng.mti = 0 } x = rng.state[rng.mti] diff --git a/vlib/rand/rand.c.v b/vlib/rand/rand.c.v index 58a74397e82ae0..99cc24dd5751c1 100644 --- a/vlib/rand/rand.c.v +++ b/vlib/rand/rand.c.v @@ -40,7 +40,7 @@ fn internal_uuid_v4(mut rng PRNG) string { // >> to zero and one, respectively. // all nibbles starting with 10 are: 1000, 1001, 1010, 1011 -> hex digits `8`, `9`, `a`, `b` // these are stored in clock_seq_hi_and_reserved_valid_values, choose one of them at random: - buf[19] = rand.clock_seq_hi_and_reserved_valid_values[d & 0x03] + buf[19] = clock_seq_hi_and_reserved_valid_values[d & 0x03] // >> Set the four most significant bits (bits 12 through 15) of the // >> time_hi_and_version field to the 4-bit version number from Section 4.1.3. buf[14] = `4` @@ -63,7 +63,7 @@ fn internal_ulid_at_millisecond(mut rng PRNG, unix_time_milli u64) string { mut i := 9 for i >= 0 { unsafe { - buf[i] = rand.ulid_encoding[t & 0x1F] + buf[i] = ulid_encoding[t & 0x1F] } t = t >> 5 i-- @@ -73,7 +73,7 @@ fn internal_ulid_at_millisecond(mut rng PRNG, unix_time_milli u64) string { i = 10 for i < 19 { unsafe { - buf[i] = rand.ulid_encoding[x & 0x1F] + buf[i] = ulid_encoding[x & 0x1F] } x = x >> 5 i++ @@ -82,7 +82,7 @@ fn internal_ulid_at_millisecond(mut rng PRNG, unix_time_milli u64) string { x = rng.u64() for i < 26 { unsafe { - buf[i] = rand.ulid_encoding[x & 0x1F] + buf[i] = ulid_encoding[x & 0x1F] } x = x >> 5 i++ diff --git a/vlib/rand/rand.js.v b/vlib/rand/rand.js.v index f06bb4c189cba6..14b0c230e1becb 100644 --- a/vlib/rand/rand.js.v +++ b/vlib/rand/rand.js.v @@ -27,7 +27,7 @@ fn internal_ulid_at_millisecond(mut rng PRNG, unix_time_milli u64) string { mut t := unix_time_milli mut i := 9 for i >= 0 { - buf[i] = rand.ulid_encoding[int(t & 0x1f)] + buf[i] = ulid_encoding[int(t & 0x1f)] t = t >> 5 i-- } @@ -35,7 +35,7 @@ fn internal_ulid_at_millisecond(mut rng PRNG, unix_time_milli u64) string { mut x := rng.u64() i = 10 for i < 19 { - buf[i] = rand.ulid_encoding[int(x & 0x1f)] + buf[i] = ulid_encoding[int(x & 0x1f)] x = x >> 5 i++ @@ -43,7 +43,7 @@ fn internal_ulid_at_millisecond(mut rng PRNG, unix_time_milli u64) string { x = rng.u64() for i < 26 { - buf[i] = rand.ulid_encoding[int(x & 0x1f)] + buf[i] = ulid_encoding[int(x & 0x1f)] x = x >> 5 i++ } diff --git a/vlib/rand/rand.v b/vlib/rand/rand.v index bff2eeef10ae5b..ae4ba23daf6481 100644 --- a/vlib/rand/rand.v +++ b/vlib/rand/rand.v @@ -338,17 +338,17 @@ pub fn (mut rng PRNG) string_from_set(charset string, len int) string { // string returns a string of length `len` containing random characters in range `[a-zA-Z]`. pub fn (mut rng PRNG) string(len int) string { - return internal_string_from_set(mut rng, rand.english_letters, len) + return internal_string_from_set(mut rng, english_letters, len) } // hex returns a hexadecimal number of length `len` containing random characters in range `[a-f0-9]`. pub fn (mut rng PRNG) hex(len int) string { - return internal_string_from_set(mut rng, rand.hex_chars, len) + return internal_string_from_set(mut rng, hex_chars, len) } // ascii returns a random string of the printable ASCII characters with length `len`. pub fn (mut rng PRNG) ascii(len int) string { - return internal_string_from_set(mut rng, rand.ascii_chars, len) + return internal_string_from_set(mut rng, ascii_chars, len) } // fill_buffer_from_set fills the mutable `buf` with random characters from the given `charset` @@ -689,17 +689,17 @@ pub fn fill_buffer_from_set(charset string, mut buf []u8) { // string returns a string of length `len` containing random characters in range `[a-zA-Z]`. pub fn string(len int) string { - return string_from_set(rand.english_letters, len) + return string_from_set(english_letters, len) } // hex returns a hexadecimal number of length `len` containing random characters in range `[a-f0-9]`. pub fn hex(len int) string { - return string_from_set(rand.hex_chars, len) + return string_from_set(hex_chars, len) } // ascii returns a random string of the printable ASCII characters with length `len`. pub fn ascii(len int) string { - return string_from_set(rand.ascii_chars, len) + return string_from_set(ascii_chars, len) } // shuffle randomly permutates the elements in `a`. The range for shuffling is diff --git a/vlib/rand/sys/system_rng.c.v b/vlib/rand/sys/system_rng.c.v index 51c4711ac899b7..62c398b74f4541 100644 --- a/vlib/rand/sys/system_rng.c.v +++ b/vlib/rand/sys/system_rng.c.v @@ -28,8 +28,8 @@ const u32_iter_count = calculate_iterations_for(32) const u64_iter_count = calculate_iterations_for(64) fn calculate_iterations_for(bits_ int) int { - base := bits_ / sys.rand_bitsize - extra := if bits_ % sys.rand_bitsize == 0 { 0 } else { 1 } + base := bits_ / rand_bitsize + extra := if bits_ % rand_bitsize == 0 { 0 } else { 1 } return base + extra } @@ -70,7 +70,7 @@ pub fn (mut r SysRNG) u8() u8 { return value } r.buffer = u64(r.default_rand()) - r.bytes_left = sys.rand_bytesize - 1 + r.bytes_left = rand_bytesize - 1 value := u8(r.buffer) r.buffer >>= 8 return value @@ -86,8 +86,8 @@ pub fn (mut r SysRNG) u16() u16 { return value } mut result := u16(C.rand()) - for i in 1 .. sys.u16_iter_count { - result = result ^ (u16(C.rand()) << (sys.rand_bitsize * i)) + for i in 1 .. u16_iter_count { + result = result ^ (u16(C.rand()) << (rand_bitsize * i)) } return result } @@ -96,8 +96,8 @@ pub fn (mut r SysRNG) u16() u16 { @[inline] pub fn (r SysRNG) u32() u32 { mut result := u32(C.rand()) - for i in 1 .. sys.u32_iter_count { - result = result ^ (u32(C.rand()) << (sys.rand_bitsize * i)) + for i in 1 .. u32_iter_count { + result = result ^ (u32(C.rand()) << (rand_bitsize * i)) } return result } @@ -106,8 +106,8 @@ pub fn (r SysRNG) u32() u32 { @[inline] pub fn (r SysRNG) u64() u64 { mut result := u64(C.rand()) - for i in 1 .. sys.u64_iter_count { - result = result ^ (u64(C.rand()) << (sys.rand_bitsize * i)) + for i in 1 .. u64_iter_count { + result = result ^ (u64(C.rand()) << (rand_bitsize * i)) } return result } @@ -115,7 +115,7 @@ pub fn (r SysRNG) u64() u64 { // block_size returns the number of bits that the RNG can produce in a single iteration. @[inline] pub fn (r SysRNG) block_size() int { - return sys.rand_bitsize + return rand_bitsize } // free should be called when the generator is no longer needed diff --git a/vlib/rand/wyrand/wyrand.v b/vlib/rand/wyrand/wyrand.v index a008e62fc7c692..4d9401f151d34f 100644 --- a/vlib/rand/wyrand/wyrand.v +++ b/vlib/rand/wyrand/wyrand.v @@ -86,9 +86,9 @@ pub fn (mut rng WyRandRNG) u32() u32 { pub fn (mut rng WyRandRNG) u64() u64 { unsafe { mut seed1 := rng.state - seed1 += wyrand.wyp0 + seed1 += wyp0 rng.state = seed1 - return hash.wymum(seed1 ^ wyrand.wyp1, seed1) + return hash.wymum(seed1 ^ wyp1, seed1) } return 0 } diff --git a/vlib/rand/xoroshiro128pp/xoros128pp_test.v b/vlib/rand/xoroshiro128pp/xoros128pp_test.v index e09b83ed6e0b6c..444e6c231cb1eb 100644 --- a/vlib/rand/xoroshiro128pp/xoros128pp_test.v +++ b/vlib/rand/xoroshiro128pp/xoros128pp_test.v @@ -39,11 +39,11 @@ fn test_xoroshiro128pp_variability() { // If this test fails and if it is certainly not the implementation // at fault, try changing the seed values. Repeated values are // improbable but not impossible. - for seed in xoroshiro128pp.seeds { + for seed in seeds { mut rng := &rand.PRNG(&XOROS128PPRNG{}) rng.seed(seed) - mut values := []u64{cap: xoroshiro128pp.value_count} - for i in 0 .. xoroshiro128pp.value_count { + mut values := []u64{cap: value_count} + for i in 0 .. value_count { value := rng.u64() assert value !in values assert values.len == i @@ -56,20 +56,20 @@ fn check_uniformity_u64(mut rng rand.PRNG, range u64) { range_f64 := f64(range) expected_mean := range_f64 / 2.0 mut variance := 0.0 - for _ in 0 .. xoroshiro128pp.sample_size { + for _ in 0 .. sample_size { diff := f64(rng.u64n(range) or { panic("Couldn't obtain u64") }) - expected_mean variance += diff * diff } - variance /= xoroshiro128pp.sample_size - 1 + variance /= sample_size - 1 sigma := math.sqrt(variance) - expected_sigma := range_f64 * xoroshiro128pp.inv_sqrt_12 + expected_sigma := range_f64 * inv_sqrt_12 error := (sigma - expected_sigma) / expected_sigma - assert math.abs(error) < xoroshiro128pp.stats_epsilon + assert math.abs(error) < stats_epsilon } fn test_xoroshiro128pp_uniformity_u64() { ranges := [14019545, 80240, 130] - for seed in xoroshiro128pp.seeds { + for seed in seeds { mut rng := &rand.PRNG(&XOROS128PPRNG{}) rng.seed(seed) for range in ranges { @@ -81,20 +81,20 @@ fn test_xoroshiro128pp_uniformity_u64() { fn check_uniformity_f64(mut rng rand.PRNG) { expected_mean := 0.5 mut variance := 0.0 - for _ in 0 .. xoroshiro128pp.sample_size { + for _ in 0 .. sample_size { diff := rng.f64() - expected_mean variance += diff * diff } - variance /= xoroshiro128pp.sample_size - 1 + variance /= sample_size - 1 sigma := math.sqrt(variance) - expected_sigma := xoroshiro128pp.inv_sqrt_12 + expected_sigma := inv_sqrt_12 error := (sigma - expected_sigma) / expected_sigma - assert math.abs(error) < xoroshiro128pp.stats_epsilon + assert math.abs(error) < stats_epsilon } fn test_xoroshiro128pp_uniformity_f64() { // The f64 version - for seed in xoroshiro128pp.seeds { + for seed in seeds { mut rng := &rand.PRNG(&XOROS128PPRNG{}) rng.seed(seed) check_uniformity_f64(mut rng) @@ -103,10 +103,10 @@ fn test_xoroshiro128pp_uniformity_f64() { fn test_xoroshiro128pp_u32n() { max := u32(16384) - for seed in xoroshiro128pp.seeds { + for seed in seeds { mut rng := &rand.PRNG(&XOROS128PPRNG{}) rng.seed(seed) - for _ in 0 .. xoroshiro128pp.range_limit { + for _ in 0 .. range_limit { value := rng.u32n(max) or { panic("Couldn't obtain u32") } assert value >= 0 assert value < max @@ -116,10 +116,10 @@ fn test_xoroshiro128pp_u32n() { fn test_xoroshiro128pp_u64n() { max := u64(379091181005) - for seed in xoroshiro128pp.seeds { + for seed in seeds { mut rng := &rand.PRNG(&XOROS128PPRNG{}) rng.seed(seed) - for _ in 0 .. xoroshiro128pp.range_limit { + for _ in 0 .. range_limit { value := rng.u64n(max) or { panic("Couldn't obtain u64") } assert value >= 0 assert value < max @@ -130,10 +130,10 @@ fn test_xoroshiro128pp_u64n() { fn test_xoroshiro128pp_u32_in_range() { max := u32(484468466) min := u32(316846) - for seed in xoroshiro128pp.seeds { + for seed in seeds { mut rng := &rand.PRNG(&XOROS128PPRNG{}) rng.seed(seed) - for _ in 0 .. xoroshiro128pp.range_limit { + for _ in 0 .. range_limit { value := rng.u32_in_range(u32(min), u32(max)) or { panic("Couldn't obtain u32 in range") } @@ -146,10 +146,10 @@ fn test_xoroshiro128pp_u32_in_range() { fn test_xoroshiro128pp_u64_in_range() { max := u64(216468454685163) min := u64(6848646868) - for seed in xoroshiro128pp.seeds { + for seed in seeds { mut rng := &rand.PRNG(&XOROS128PPRNG{}) rng.seed(seed) - for _ in 0 .. xoroshiro128pp.range_limit { + for _ in 0 .. range_limit { value := rng.u64_in_range(min, max) or { panic("Couldn't obtain u64 in range") } assert value >= min assert value < max @@ -160,10 +160,10 @@ fn test_xoroshiro128pp_u64_in_range() { fn test_xoroshiro128pp_int31() { max_u31 := int(0x7FFFFFFF) sign_mask := int(0x80000000) - for seed in xoroshiro128pp.seeds { + for seed in seeds { mut rng := &rand.PRNG(&XOROS128PPRNG{}) rng.seed(seed) - for _ in 0 .. xoroshiro128pp.range_limit { + for _ in 0 .. range_limit { value := rng.int31() assert value >= 0 assert value <= max_u31 @@ -176,10 +176,10 @@ fn test_xoroshiro128pp_int31() { fn test_xoroshiro128pp_int63() { max_u63 := i64(0x7FFFFFFFFFFFFFFF) sign_mask := i64(0x8000000000000000) - for seed in xoroshiro128pp.seeds { + for seed in seeds { mut rng := &rand.PRNG(&XOROS128PPRNG{}) rng.seed(seed) - for _ in 0 .. xoroshiro128pp.range_limit { + for _ in 0 .. range_limit { value := rng.int63() assert value >= 0 assert value <= max_u63 @@ -190,10 +190,10 @@ fn test_xoroshiro128pp_int63() { fn test_xoroshiro128pp_intn() { max := 2525642 - for seed in xoroshiro128pp.seeds { + for seed in seeds { mut rng := &rand.PRNG(&XOROS128PPRNG{}) rng.seed(seed) - for _ in 0 .. xoroshiro128pp.range_limit { + for _ in 0 .. range_limit { value := rng.intn(max) or { panic("Couldn't obtain int") } assert value >= 0 assert value < max @@ -203,10 +203,10 @@ fn test_xoroshiro128pp_intn() { fn test_xoroshiro128pp_i64n() { max := i64(3246727724653636) - for seed in xoroshiro128pp.seeds { + for seed in seeds { mut rng := &rand.PRNG(&XOROS128PPRNG{}) rng.seed(seed) - for _ in 0 .. xoroshiro128pp.range_limit { + for _ in 0 .. range_limit { value := rng.i64n(max) or { panic("Couldn't obtain i64") } assert value >= 0 assert value < max @@ -217,10 +217,10 @@ fn test_xoroshiro128pp_i64n() { fn test_xoroshiro128pp_int_in_range() { min := -4252 max := 1034 - for seed in xoroshiro128pp.seeds { + for seed in seeds { mut rng := &rand.PRNG(&XOROS128PPRNG{}) rng.seed(seed) - for _ in 0 .. xoroshiro128pp.range_limit { + for _ in 0 .. range_limit { value := rng.int_in_range(min, max) or { panic("Couldn't obtain int in range") } assert value >= min assert value < max @@ -231,10 +231,10 @@ fn test_xoroshiro128pp_int_in_range() { fn test_xoroshiro128pp_i64_in_range() { min := i64(-24095) max := i64(324058) - for seed in xoroshiro128pp.seeds { + for seed in seeds { mut rng := &rand.PRNG(&XOROS128PPRNG{}) rng.seed(seed) - for _ in 0 .. xoroshiro128pp.range_limit { + for _ in 0 .. range_limit { value := rng.i64_in_range(min, max) or { panic("Couldn't obtain i64 in range") } assert value >= min assert value < max @@ -243,10 +243,10 @@ fn test_xoroshiro128pp_i64_in_range() { } fn test_xoroshiro128pp_f32() { - for seed in xoroshiro128pp.seeds { + for seed in seeds { mut rng := &rand.PRNG(&XOROS128PPRNG{}) rng.seed(seed) - for _ in 0 .. xoroshiro128pp.range_limit { + for _ in 0 .. range_limit { value := rng.f32() assert value >= 0.0 assert value < 1.0 @@ -255,10 +255,10 @@ fn test_xoroshiro128pp_f32() { } fn test_xoroshiro128pp_f64() { - for seed in xoroshiro128pp.seeds { + for seed in seeds { mut rng := &rand.PRNG(&XOROS128PPRNG{}) rng.seed(seed) - for _ in 0 .. xoroshiro128pp.range_limit { + for _ in 0 .. range_limit { value := rng.f64() assert value >= 0.0 assert value < 1.0 @@ -268,10 +268,10 @@ fn test_xoroshiro128pp_f64() { fn test_xoroshiro128pp_f32n() { max := f32(357.0) - for seed in xoroshiro128pp.seeds { + for seed in seeds { mut rng := &rand.PRNG(&XOROS128PPRNG{}) rng.seed(seed) - for _ in 0 .. xoroshiro128pp.range_limit { + for _ in 0 .. range_limit { value := rng.f32n(max) or { panic("Couldn't obtain f32") } assert value >= 0.0 assert value < max @@ -281,10 +281,10 @@ fn test_xoroshiro128pp_f32n() { fn test_xoroshiro128pp_f64n() { max := 1.52e6 - for seed in xoroshiro128pp.seeds { + for seed in seeds { mut rng := &rand.PRNG(&XOROS128PPRNG{}) rng.seed(seed) - for _ in 0 .. xoroshiro128pp.range_limit { + for _ in 0 .. range_limit { value := rng.f64n(max) or { panic("Couldn't obtain f64") } assert value >= 0.0 assert value < max @@ -295,10 +295,10 @@ fn test_xoroshiro128pp_f64n() { fn test_xoroshiro128pp_f32_in_range() { min := f32(-24.0) max := f32(125.0) - for seed in xoroshiro128pp.seeds { + for seed in seeds { mut rng := &rand.PRNG(&XOROS128PPRNG{}) rng.seed(seed) - for _ in 0 .. xoroshiro128pp.range_limit { + for _ in 0 .. range_limit { value := rng.f32_in_range(min, max) or { panic("Couldn't obtain f32 in range") } assert value >= min assert value < max @@ -309,10 +309,10 @@ fn test_xoroshiro128pp_f32_in_range() { fn test_xoroshiro128pp_f64_in_range() { min := -548.7 max := 5015.2 - for seed in xoroshiro128pp.seeds { + for seed in seeds { mut rng := &rand.PRNG(&XOROS128PPRNG{}) rng.seed(seed) - for _ in 0 .. xoroshiro128pp.range_limit { + for _ in 0 .. range_limit { value := rng.f64_in_range(min, max) or { panic("Couldn't obtain f64 in range") } assert value >= min assert value < max diff --git a/vlib/regex/regex.v b/vlib/regex/regex.v index 62784bd19a5ccc..df7ebbd70b3ce4 100644 --- a/vlib/regex/regex.v +++ b/vlib/regex/regex.v @@ -85,7 +85,7 @@ fn utf8util_char_len(b u8) int { fn (re RE) get_char(in_txt string, i int) (u32, int) { ini := unsafe { in_txt.str[i] } // ascii 8 bit - if (re.flag & regex.f_bin) != 0 || ini & 0x80 == 0 { + if (re.flag & f_bin) != 0 || ini & 0x80 == 0 { return u32(ini), 1 } // unicode char @@ -103,7 +103,7 @@ fn (re RE) get_char(in_txt string, i int) (u32, int) { @[direct_array_access; inline] fn (re RE) get_charb(in_txt &u8, i int) (u32, int) { // ascii 8 bit - if (re.flag & regex.f_bin) != 0 || unsafe { in_txt[i] } & 0x80 == 0 { + if (re.flag & f_bin) != 0 || unsafe { in_txt[i] } & 0x80 == 0 { return u32(unsafe { in_txt[i] }), 1 } // unicode char @@ -144,7 +144,7 @@ fn is_not_alnum(in_char u8) bool { @[inline] fn is_space(in_char u8) bool { - return in_char in regex.spaces + return in_char in spaces } @[inline] @@ -189,20 +189,20 @@ fn is_upper(in_char u8) bool { pub fn (re RE) get_parse_error_string(err int) string { match err { - regex.compile_ok { return 'compile_ok' } - regex.no_match_found { return 'no_match_found' } - regex.err_char_unknown { return 'err_char_unknown' } - regex.err_undefined { return 'err_undefined' } - regex.err_internal_error { return 'err_internal_error' } - regex.err_cc_alloc_overflow { return 'err_cc_alloc_overflow' } - regex.err_syntax_error { return 'err_syntax_error' } - regex.err_groups_overflow { return 'err_groups_overflow' } - regex.err_groups_max_nested { return 'err_groups_max_nested' } - regex.err_group_not_balanced { return 'err_group_not_balanced' } - regex.err_group_qm_notation { return 'err_group_qm_notation' } - regex.err_invalid_or_with_cc { return 'err_invalid_or_with_cc' } - regex.err_neg_group_quantifier { return 'err_neg_group_quantifier' } - regex.err_consecutive_dots { return 'err_consecutive_dots' } + compile_ok { return 'compile_ok' } + no_match_found { return 'no_match_found' } + err_char_unknown { return 'err_char_unknown' } + err_undefined { return 'err_undefined' } + err_internal_error { return 'err_internal_error' } + err_cc_alloc_overflow { return 'err_cc_alloc_overflow' } + err_syntax_error { return 'err_syntax_error' } + err_groups_overflow { return 'err_groups_overflow' } + err_groups_max_nested { return 'err_groups_max_nested' } + err_group_not_balanced { return 'err_group_not_balanced' } + err_group_qm_notation { return 'err_group_qm_notation' } + err_invalid_or_with_cc { return 'err_invalid_or_with_cc' } + err_neg_group_quantifier { return 'err_neg_group_quantifier' } + err_consecutive_dots { return 'err_consecutive_dots' } else { return 'err_unknown' } } } @@ -424,7 +424,7 @@ fn (re RE) parse_bsls(in_txt string, in_i int) (int, int, u32) { // check if is our bsls char, for now only one length sequence if status == .bsls_found { - for c, x in regex.bsls_validator_array { + for c, x in bsls_validator_array { if x.ch == ch { return c, i - in_i + 1, hex_res } @@ -468,7 +468,7 @@ fn (re RE) parse_bsls(in_txt string, in_i int) (int, int, u32) { hex_res += u32(ch - `a` + 10) i += char_len } else { - return regex.err_syntax_error, i, hex_res + return err_syntax_error, i, hex_res } // println("hex_res: ${hex_res:08x} hex_count: ${hex_count}") @@ -480,7 +480,7 @@ fn (re RE) parse_bsls(in_txt string, in_i int) (int, int, u32) { // if over 8 nibble is more than 32 bit, error if hex_count > hex_max_len { - return regex.err_syntax_error, i - in_i, hex_res + return err_syntax_error, i - in_i, hex_res } if hex_count == hex_max_len { @@ -490,22 +490,22 @@ fn (re RE) parse_bsls(in_txt string, in_i int) (int, int, u32) { } // MUST NOT BE HERE! - return regex.err_syntax_error, i, hex_res + return err_syntax_error, i, hex_res } // no BSLS validator, manage as normal escape char char if status == .normal_char { - if ch in regex.bsls_escape_list { - return regex.no_match_found, i - in_i + 1, hex_res + if ch in bsls_escape_list { + return no_match_found, i - in_i + 1, hex_res } - return regex.err_syntax_error, i - in_i + 1, hex_res + return err_syntax_error, i - in_i + 1, hex_res } // at the present time we manage only one char after the \ break } // not our bsls return KO - return regex.err_syntax_error, i, hex_res + return err_syntax_error, i, hex_res } /****************************************************************************** @@ -546,8 +546,8 @@ fn (re RE) get_char_class(pc int) string { mut cc_i := re.prog[pc].cc_index mut i := 0 mut tmp := 0 - for cc_i >= 0 && cc_i < re.cc.len && re.cc[cc_i].cc_type != regex.cc_end { - if re.cc[cc_i].cc_type == regex.cc_bsls { + for cc_i >= 0 && cc_i < re.cc.len && re.cc[cc_i].cc_type != cc_end { + if re.cc[cc_i].cc_type == cc_bsls { unsafe { buf_ptr[i] = `\\` i++ @@ -604,8 +604,8 @@ fn (re RE) get_char_class(pc int) string { fn (re RE) check_char_class(pc int, ch rune) bool { mut cc_i := re.prog[pc].cc_index - for cc_i >= 0 && cc_i < re.cc.len && re.cc[cc_i].cc_type != regex.cc_end { - if re.cc[cc_i].cc_type == regex.cc_bsls { + for cc_i >= 0 && cc_i < re.cc.len && re.cc[cc_i].cc_type != cc_end { + if re.cc[cc_i].cc_type == cc_bsls { if re.cc[cc_i].validator(u8(ch)) { return true } @@ -625,12 +625,12 @@ fn (mut re RE) parse_char_class(in_txt string, in_i int) (int, int, u32) { mut tmp_index := re.cc_index res_index := re.cc_index - mut cc_type := u32(regex.ist_char_class_pos) + mut cc_type := u32(ist_char_class_pos) for i < in_txt.len { // check if we are out of memory for char classes if tmp_index >= re.cc.len { - return regex.err_cc_alloc_overflow, 0, u32(0) + return err_cc_alloc_overflow, 0, u32(0) } // get our char @@ -641,14 +641,14 @@ fn (mut re RE) parse_char_class(in_txt string, in_i int) (int, int, u32) { // negation if status == .start && ch == `^` { - cc_type = u32(regex.ist_char_class_neg) + cc_type = u32(ist_char_class_neg) i += char_len continue } // minus symbol if status == .start && ch == `-` { - re.cc[tmp_index].cc_type = regex.cc_char + re.cc[tmp_index].cc_type = cc_char re.cc[tmp_index].ch0 = char_tmp re.cc[tmp_index].ch1 = char_tmp i += char_len @@ -666,13 +666,13 @@ fn (mut re RE) parse_char_class(in_txt string, in_i int) (int, int, u32) { if status == .in_bsls { // println("CC bsls validation.") - for c, x in regex.bsls_validator_array { + for c, x in bsls_validator_array { if x.ch == ch { // println("CC bsls found [${ch:c}]") - re.cc[tmp_index].cc_type = regex.cc_bsls - re.cc[tmp_index].ch0 = regex.bsls_validator_array[c].ch - re.cc[tmp_index].ch1 = regex.bsls_validator_array[c].ch - re.cc[tmp_index].validator = regex.bsls_validator_array[c].validator + re.cc[tmp_index].cc_type = cc_bsls + re.cc[tmp_index].ch0 = bsls_validator_array[c].ch + re.cc[tmp_index].ch1 = bsls_validator_array[c].ch + re.cc[tmp_index].validator = bsls_validator_array[c].validator i += char_len tmp_index++ status = .in_char @@ -682,7 +682,7 @@ fn (mut re RE) parse_char_class(in_txt string, in_i int) (int, int, u32) { if status == .in_bsls { // manage as a simple char // println("CC bsls not found [${ch:c}]") - re.cc[tmp_index].cc_type = regex.cc_char + re.cc[tmp_index].cc_type = cc_char re.cc[tmp_index].ch0 = char_tmp re.cc[tmp_index].ch1 = char_tmp i += char_len @@ -698,7 +698,7 @@ fn (mut re RE) parse_char_class(in_txt string, in_i int) (int, int, u32) { if (status == .start || status == .in_char) && ch != `-` && ch != `]` { status = .in_char - re.cc[tmp_index].cc_type = regex.cc_char + re.cc[tmp_index].cc_type = cc_char re.cc[tmp_index].ch0 = char_tmp re.cc[tmp_index].ch1 = char_tmp @@ -717,7 +717,7 @@ fn (mut re RE) parse_char_class(in_txt string, in_i int) (int, int, u32) { // check range end if status == .separator && ch != `]` && ch != `-` { status = .in_char - re.cc[tmp_index - 1].cc_type = regex.cc_int + re.cc[tmp_index - 1].cc_type = cc_int re.cc[tmp_index - 1].ch1 = char_tmp i += char_len continue @@ -725,7 +725,7 @@ fn (mut re RE) parse_char_class(in_txt string, in_i int) (int, int, u32) { // char class end if status == .in_char && ch == `]` { - re.cc[tmp_index].cc_type = regex.cc_end + re.cc[tmp_index].cc_type = cc_end re.cc[tmp_index].ch0 = 0 re.cc[tmp_index].ch1 = 0 re.cc_index = tmp_index + 1 @@ -735,7 +735,7 @@ fn (mut re RE) parse_char_class(in_txt string, in_i int) (int, int, u32) { i++ } - return regex.err_syntax_error, 0, u32(0) + return err_syntax_error, 0, u32(0) } /****************************************************************************** @@ -774,7 +774,7 @@ fn (re RE) parse_quantifier(in_txt string, in_i int) (int, int, int, bool) { // exit on no compatible char with {} quantifier if utf8util_char_len(ch) != 1 { - return regex.err_syntax_error, i, 0, false + return err_syntax_error, i, 0, false } // min parsing skip if comma present @@ -816,7 +816,7 @@ fn (re RE) parse_quantifier(in_txt string, in_i int) (int, int, int, bool) { // end without max if status == .comma_checked && ch == `}` { - q_max = regex.max_quantifier + q_max = max_quantifier status = .greedy continue } @@ -865,11 +865,11 @@ fn (re RE) parse_quantifier(in_txt string, in_i int) (int, int, int, bool) { } // not a {} quantifier, exit - return regex.err_syntax_error, i, 0, false + return err_syntax_error, i, 0, false } // not a conform {} quantifier - return regex.err_syntax_error, i, 0, false + return err_syntax_error, i, 0, false } // @@ -1005,12 +1005,12 @@ fn (mut re RE) impl_compile(in_txt string) (int, int) { // check special cases: $ ^ // if char_len == 1 && i == 0 && u8(char_tmp) == `^` { - re.flag = regex.f_ms + re.flag = f_ms i = i + char_len continue } if char_len == 1 && i == (in_txt.len - 1) && u8(char_tmp) == `$` { - re.flag = regex.f_me + re.flag = f_me i = i + char_len continue } @@ -1019,13 +1019,13 @@ fn (mut re RE) impl_compile(in_txt string) (int, int) { if char_len == 1 && pc >= 0 && u8(char_tmp) == `(` { // check max groups allowed if group_count > re.group_max { - return regex.err_groups_overflow, i + 1 + return err_groups_overflow, i + 1 } group_stack_index++ // check max nested groups allowed if group_stack_index > re.group_max_nested { - return regex.err_groups_max_nested, i + 1 + return err_groups_max_nested, i + 1 } tmp_res, cgroup_flag, negate_flag, cgroup_name, next_i := re.parse_groups(in_txt, @@ -1033,7 +1033,7 @@ fn (mut re RE) impl_compile(in_txt string) (int, int) { // manage question mark format error if tmp_res < -1 { - return regex.err_group_qm_notation, next_i + return err_group_qm_notation, next_i } // println("Parse group: [$tmp_res, $cgroup_flag, ($i,$next_i), '${in_txt[i..next_i]}' ]") @@ -1060,7 +1060,7 @@ fn (mut re RE) impl_compile(in_txt string) (int, int) { group_stack_txt_index[group_stack_index] = i group_stack[group_stack_index] = pc - re.prog[pc].ist = u32(0) | regex.ist_group_start + re.prog[pc].ist = u32(0) | ist_group_start re.prog[pc].rep_min = 1 re.prog[pc].rep_max = 1 @@ -1085,13 +1085,13 @@ fn (mut re RE) impl_compile(in_txt string) (int, int) { // ist_group_end if char_len == 1 && pc > 0 && u8(char_tmp) == `)` { if group_stack_index < 0 { - return regex.err_group_not_balanced, i + 1 + return err_group_not_balanced, i + 1 } goto_pc := group_stack[group_stack_index] group_stack_index-- - re.prog[pc].ist = u32(0) | regex.ist_group_end + re.prog[pc].ist = u32(0) | ist_group_end re.prog[pc].rep_min = 1 re.prog[pc].rep_max = 1 @@ -1115,11 +1115,11 @@ fn (mut re RE) impl_compile(in_txt string) (int, int) { // ist_dot_char match any char except the following token if char_len == 1 && pc >= 0 && u8(char_tmp) == `.` { // consecutive ist_dot_char is a syntax error - if pc > 0 && re.prog[pc - 1].ist == regex.ist_dot_char { - return regex.err_consecutive_dots, i + if pc > 0 && re.prog[pc - 1].ist == ist_dot_char { + return err_consecutive_dots, i } - re.prog[pc].ist = u32(0) | regex.ist_dot_char + re.prog[pc].ist = u32(0) | ist_dot_char re.prog[pc].rep_min = 1 re.prog[pc].rep_max = 1 pc = pc + 1 @@ -1129,10 +1129,10 @@ fn (mut re RE) impl_compile(in_txt string) (int, int) { // OR branch if char_len == 1 && pc > 0 && u8(char_tmp) == `|` { - if pc > 0 && re.prog[pc - 1].ist == regex.ist_or_branch { - return regex.err_syntax_error, i + if pc > 0 && re.prog[pc - 1].ist == ist_or_branch { + return err_syntax_error, i } - re.prog[pc].ist = u32(0) | regex.ist_or_branch + re.prog[pc].ist = u32(0) | ist_or_branch re.prog[pc].source_index = i pc = pc + 1 i = i + char_len @@ -1150,15 +1150,15 @@ fn (mut re RE) impl_compile(in_txt string) (int, int) { // negation groups can not have quantifiers if re.prog[pc - 1].group_neg == true && char_tmp in [`?`, `+`, `*`, `{`] { - return regex.err_neg_group_quantifier, i + return err_neg_group_quantifier, i } match u8(char_tmp) { `?` { // println("q: ${char_tmp:c}") // check illegal quantifier sequences - if char_next_len == 1 && char_next in regex.quntifier_chars { - return regex.err_syntax_error, i + if char_next_len == 1 && char_next in quntifier_chars { + return err_syntax_error, i } re.prog[pc - 1].rep_min = 0 re.prog[pc - 1].rep_max = 1 @@ -1166,20 +1166,20 @@ fn (mut re RE) impl_compile(in_txt string) (int, int) { `+` { // println("q: ${char_tmp:c}") // check illegal quantifier sequences - if char_next_len == 1 && char_next in regex.quntifier_chars { - return regex.err_syntax_error, i + if char_next_len == 1 && char_next in quntifier_chars { + return err_syntax_error, i } re.prog[pc - 1].rep_min = 1 - re.prog[pc - 1].rep_max = regex.max_quantifier + re.prog[pc - 1].rep_max = max_quantifier } `*` { // println("q: ${char_tmp:c}") // check illegal quantifier sequences - if char_next_len == 1 && char_next in regex.quntifier_chars { - return regex.err_syntax_error, i + if char_next_len == 1 && char_next in quntifier_chars { + return err_syntax_error, i } re.prog[pc - 1].rep_min = 0 - re.prog[pc - 1].rep_max = regex.max_quantifier + re.prog[pc - 1].rep_max = max_quantifier } `{` { min, max, tmp, greedy := re.parse_quantifier(in_txt, i + 1) @@ -1193,8 +1193,8 @@ fn (mut re RE) impl_compile(in_txt string) (int, int) { // check illegal quantifier sequences if i <= in_txt.len { char_next, char_next_len = re.get_char(in_txt, i) - if char_next_len == 1 && char_next in regex.quntifier_chars { - return regex.err_syntax_error, i + if char_next_len == 1 && char_next in quntifier_chars { + return err_syntax_error, i } } continue @@ -1251,11 +1251,11 @@ fn (mut re RE) impl_compile(in_txt string) (int, int) { bsls_index, tmp, hex_res := re.parse_bsls(in_txt, i) if bsls_index >= 0 { i = i + tmp - re.prog[pc].ist = u32(0) | regex.ist_bsls_char + re.prog[pc].ist = u32(0) | ist_bsls_char re.prog[pc].rep_min = 1 re.prog[pc].rep_max = 1 - re.prog[pc].validator = regex.bsls_validator_array[bsls_index].validator - re.prog[pc].ch = regex.bsls_validator_array[bsls_index].ch + re.prog[pc].validator = bsls_validator_array[bsls_index].validator + re.prog[pc].ch = bsls_validator_array[bsls_index].ch pc = pc + 1 continue } @@ -1274,7 +1274,7 @@ fn (mut re RE) impl_compile(in_txt string) (int, int) { count-- for count >= 0 { - re.prog[pc].ist = regex.ist_simple_char + re.prog[pc].ist = ist_simple_char re.prog[pc].ch = value_list[count] re.prog[pc].ch_len = u8(char_len) re.prog[pc].rep_min = 1 @@ -1288,7 +1288,7 @@ fn (mut re RE) impl_compile(in_txt string) (int, int) { continue } // this is an escape char, skip the bsls and continue as a normal char - else if bsls_index == regex.no_match_found { + else if bsls_index == no_match_found { i += char_len char_tmp, char_len = re.get_char(in_txt, i) // continue as simple char @@ -1301,7 +1301,7 @@ fn (mut re RE) impl_compile(in_txt string) (int, int) { } // ist_simple_char - re.prog[pc].ist = regex.ist_simple_char + re.prog[pc].ist = ist_simple_char re.prog[pc].ch = char_tmp re.prog[pc].ch_len = u8(char_len) re.prog[pc].rep_min = 1 @@ -1313,17 +1313,17 @@ fn (mut re RE) impl_compile(in_txt string) (int, int) { } // add end of the program - re.prog[pc].ist = regex.ist_prog_end + re.prog[pc].ist = ist_prog_end re.prog_len = pc // check for unbalanced groups if group_stack_index != -1 { - return regex.err_group_not_balanced, group_stack_txt_index[group_stack_index] + 1 + return err_group_not_balanced, group_stack_txt_index[group_stack_index] + 1 } // check for OR at the end of the program - if pc > 0 && re.prog[pc - 1].ist == regex.ist_or_branch { - return regex.err_syntax_error, in_txt.len - 1 + if pc > 0 && re.prog[pc - 1].ist == ist_or_branch { + return err_syntax_error, in_txt.len - 1 } // store the number of groups in the query @@ -1342,18 +1342,17 @@ fn (mut re RE) impl_compile(in_txt string) (int, int) { mut dot_char_count := 0 mut last_dot_char_pc := -1 for pc1 < pc { - if re.prog[pc1].ist == regex.ist_dot_char { + if re.prog[pc1].ist == ist_dot_char { // println("Dot_char pc: $pc1") last_dot_char_pc = pc1 dot_char_count++ mut pc2 := pc1 + 1 for pc2 < pc { // consecutive dot chars is an error - if re.prog[pc2].ist == regex.ist_dot_char { - return regex.err_syntax_error, 0 + if re.prog[pc2].ist == ist_dot_char { + return err_syntax_error, 0 } - if re.prog[pc2].ist !in [u32(regex.ist_prog_end), regex.ist_group_end, - regex.ist_group_start] { + if re.prog[pc2].ist !in [u32(ist_prog_end), ist_group_end, ist_group_start] { // println("Next dot char check is PC: ${pc2}") re.prog[pc1].dot_check_pc = pc2 break @@ -1369,7 +1368,7 @@ fn (mut re RE) impl_compile(in_txt string) (int, int) { pc1 = last_dot_char_pc + 1 mut is_last_dot := true for pc1 < pc { - if re.prog[pc1].ist !in [u32(regex.ist_prog_end), regex.ist_group_end] { + if re.prog[pc1].ist !in [u32(ist_prog_end), ist_group_end] { is_last_dot = false break } @@ -1389,14 +1388,13 @@ fn (mut re RE) impl_compile(in_txt string) (int, int) { mut bsls_char_count := 0 mut last_bsls_char_pc := -1 for pc1 < pc { - if re.prog[pc1].ist == regex.ist_bsls_char { + if re.prog[pc1].ist == ist_bsls_char { // println("bsls_char pc: $pc1") last_bsls_char_pc = pc1 bsls_char_count++ mut pc2 := pc1 + 1 for pc2 < pc { - if re.prog[pc2].ist !in [u32(regex.ist_prog_end), regex.ist_group_end, - regex.ist_group_start] { + if re.prog[pc2].ist !in [u32(ist_prog_end), ist_group_end, ist_group_start] { // println("Next bsls check is PC: ${pc2}") re.prog[pc1].bsls_check_pc = pc2 break @@ -1412,7 +1410,7 @@ fn (mut re RE) impl_compile(in_txt string) (int, int) { pc1 = last_bsls_char_pc + 1 mut is_last_bsls := true for pc1 < pc { - if re.prog[pc1].ist !in [u32(regex.ist_prog_end), regex.ist_group_end] { + if re.prog[pc1].ist !in [u32(ist_prog_end), ist_group_end] { is_last_bsls = false break } @@ -1430,13 +1428,12 @@ fn (mut re RE) impl_compile(in_txt string) (int, int) { mut cc_char_count := 0 mut last_cc_char_pc := -1 for pc1 < pc { - if re.prog[pc1].ist in [u32(regex.ist_char_class_pos), regex.ist_char_class_neg] { + if re.prog[pc1].ist in [u32(ist_char_class_pos), ist_char_class_neg] { last_cc_char_pc = pc1 cc_char_count++ mut pc2 := pc1 + 1 for pc2 < pc { - if re.prog[pc2].ist !in [u32(regex.ist_prog_end), regex.ist_group_end, - regex.ist_group_start] { + if re.prog[pc2].ist !in [u32(ist_prog_end), ist_group_end, ist_group_start] { // println("Next CC check is PC: ${pc2}") re.prog[pc1].cc_check_pc = pc2 break @@ -1452,7 +1449,7 @@ fn (mut re RE) impl_compile(in_txt string) (int, int) { pc1 = last_cc_char_pc + 1 mut is_last_cc := true for pc1 < pc { - if re.prog[pc1].ist !in [u32(regex.ist_prog_end), regex.ist_group_end] { + if re.prog[pc1].ist !in [u32(ist_prog_end), ist_group_end] { is_last_cc = false break } @@ -1474,23 +1471,22 @@ fn (mut re RE) impl_compile(in_txt string) (int, int) { for pc1 < pc - 2 { // println("Here $pc1 ${pc-2}") // println("source index: ${pc1 + 1} => ${re.prog[pc1+1].source_index}") - if re.prog[pc1 + 1].ist == regex.ist_or_branch { + if re.prog[pc1 + 1].ist == ist_or_branch { // two consecutive OR are a syntax error - if re.prog[pc1 + 2].ist == regex.ist_or_branch { - return regex.err_syntax_error, i + if re.prog[pc1 + 2].ist == ist_or_branch { + return err_syntax_error, i } // check for []|[] errors - if re.prog[pc1].ist == regex.ist_char_class_pos - && re.prog[pc1 + 2].ist == regex.ist_char_class_pos { - return regex.err_invalid_or_with_cc, re.prog[pc1 + 1].source_index + if re.prog[pc1].ist == ist_char_class_pos && re.prog[pc1 + 2].ist == ist_char_class_pos { + return err_invalid_or_with_cc, re.prog[pc1 + 1].source_index } } // manange a|b chains like a|(b)|c|d... // standard solution - if re.prog[pc1].ist != regex.ist_or_branch && re.prog[pc1 + 1].ist == regex.ist_or_branch - && re.prog[pc1 + 2].ist != regex.ist_or_branch { + if re.prog[pc1].ist != ist_or_branch && re.prog[pc1 + 1].ist == ist_or_branch + && re.prog[pc1 + 2].ist != ist_or_branch { re.prog[pc1].next_is_or = true // set that the next token is an OR re.prog[pc1 + 1].rep_min = pc1 + 2 // failed match jump @@ -1498,11 +1494,11 @@ fn (mut re RE) impl_compile(in_txt string) (int, int) { mut pc2 := pc1 + 2 for pc2 < pc - 1 { ist := re.prog[pc2].ist - if ist == regex.ist_group_start { + if ist == ist_group_start { re.prog[pc1 + 1].rep_max = re.prog[pc2].goto_pc + 1 break } - if ist != regex.ist_or_branch { + if ist != ist_or_branch { re.prog[pc1 + 1].rep_max = pc2 + 1 break } @@ -1530,14 +1526,14 @@ fn (mut re RE) impl_compile(in_txt string) (int, int) { } //****************************************** - return regex.compile_ok, 0 + return compile_ok, 0 } // get_code return the compiled code as regex string, note: may be different from the source! pub fn (re RE) get_code() string { mut pc1 := 0 mut res := strings.new_builder(re.cc.len * 2 * re.prog.len) - res.write_string('========================================\nv RegEx compiler v ${regex.v_regex_version} output:\n') + res.write_string('========================================\nv RegEx compiler v ${v_regex_version} output:\n') mut stop_flag := false @@ -1549,32 +1545,32 @@ pub fn (re RE) get_code() string { res.write_string('${tk.ist:8x}'.replace(' ', '0')) res.write_string(' ') ist := tk.ist - if ist == regex.ist_bsls_char { + if ist == ist_bsls_char { res.write_string('[\\${tk.ch:1c}] BSLS') if tk.last_dot_flag == true { res.write_string(' last!') } - } else if ist == regex.ist_prog_end { + } else if ist == ist_prog_end { res.write_string('PROG_END') stop_flag = true - } else if ist == regex.ist_or_branch { + } else if ist == ist_or_branch { res.write_string('OR ') - } else if ist == regex.ist_char_class_pos { + } else if ist == ist_char_class_pos { res.write_string('[${re.get_char_class(pc1)}] CHAR_CLASS_POS') if tk.last_dot_flag == true { res.write_string(' last!') } - } else if ist == regex.ist_char_class_neg { + } else if ist == ist_char_class_neg { res.write_string('[^${re.get_char_class(pc1)}] CHAR_CLASS_NEG') if tk.last_dot_flag == true { res.write_string(' last!') } - } else if ist == regex.ist_dot_char { + } else if ist == ist_dot_char { res.write_string('. DOT_CHAR nx chk: ${tk.dot_check_pc}') if tk.last_dot_flag == true { res.write_string(' last!') } - } else if ist == regex.ist_group_start { + } else if ist == ist_group_start { res.write_string('( GROUP_START #:${tk.group_id}') if tk.group_id == -1 { res.write_string(' ?:') @@ -1586,9 +1582,9 @@ pub fn (re RE) get_code() string { } } } - } else if ist == regex.ist_group_end { + } else if ist == ist_group_end { res.write_string(') GROUP_END #:${tk.group_id}') - } else if ist == regex.ist_simple_char { + } else if ist == ist_simple_char { if tk.flag == 0 { res.write_string('[${tk.ch:1c}] query_ch') } else { @@ -1596,10 +1592,10 @@ pub fn (re RE) get_code() string { } } - if tk.rep_max == regex.max_quantifier { + if tk.rep_max == max_quantifier { res.write_string(' {${tk.rep_min:3d},MAX}') } else { - if ist == regex.ist_or_branch { + if ist == ist_or_branch { res.write_string(' if false go: ${tk.rep_min:3d} if true go: ${tk.rep_max:3d}') } else { res.write_string(' {${tk.rep_min:3d},${tk.rep_max:3d}}') @@ -1624,17 +1620,17 @@ pub fn (re RE) get_code() string { pub fn (re RE) get_query() string { mut res := strings.new_builder(re.query.len * 2) - if (re.flag & regex.f_ms) != 0 { + if (re.flag & f_ms) != 0 { res.write_string('^') } mut i := 0 - for i < re.prog.len && re.prog[i].ist != regex.ist_prog_end && re.prog[i].ist != 0 { + for i < re.prog.len && re.prog[i].ist != ist_prog_end && re.prog[i].ist != 0 { tk := unsafe { &re.prog[i] } ch := tk.ist // GROUP start - if ch == regex.ist_group_start { + if ch == ist_group_start { if re.debug > 0 { res.write_string('#${tk.group_id}') } @@ -1658,12 +1654,12 @@ pub fn (re RE) get_query() string { } // GROUP end - if ch == regex.ist_group_end { + if ch == ist_group_end { res.write_string(')') } // OR branch - if ch == regex.ist_or_branch { + if ch == ist_or_branch { res.write_string('|') if re.debug > 0 { res.write_string('{${tk.rep_min},${tk.rep_max}}') @@ -1673,9 +1669,9 @@ pub fn (re RE) get_query() string { } // char class - if ch == regex.ist_char_class_neg || ch == regex.ist_char_class_pos { + if ch == ist_char_class_neg || ch == ist_char_class_pos { res.write_string('[') - if ch == regex.ist_char_class_neg { + if ch == ist_char_class_neg { res.write_string('^') } res.write_string('${re.get_char_class(i)}') @@ -1683,19 +1679,19 @@ pub fn (re RE) get_query() string { } // bsls char - if ch == regex.ist_bsls_char { + if ch == ist_bsls_char { res.write_string('\\${tk.ch:1c}') } // ist_dot_char - if ch == regex.ist_dot_char { + if ch == ist_dot_char { res.write_string('.') } // char alone - if ch == regex.ist_simple_char { + if ch == ist_simple_char { if tk.flag == 0 { - if u8(ch) in regex.bsls_escape_list { + if u8(ch) in bsls_escape_list { res.write_string('\\') } res.write_string('${tk.ch:c}') @@ -1708,12 +1704,12 @@ pub fn (re RE) get_query() string { if !(tk.rep_min == 1 && tk.rep_max == 1) && tk.group_neg == false { if tk.rep_min == 0 && tk.rep_max == 1 { res.write_string('?') - } else if tk.rep_min == 1 && tk.rep_max == regex.max_quantifier { + } else if tk.rep_min == 1 && tk.rep_max == max_quantifier { res.write_string('+') - } else if tk.rep_min == 0 && tk.rep_max == regex.max_quantifier { + } else if tk.rep_min == 0 && tk.rep_max == max_quantifier { res.write_string('*') } else { - if tk.rep_max == regex.max_quantifier { + if tk.rep_max == max_quantifier { res.write_string('{${tk.rep_min},MAX}') } else { res.write_string('{${tk.rep_min},${tk.rep_max}}') @@ -1725,7 +1721,7 @@ pub fn (re RE) get_query() string { } i++ } - if (re.flag & regex.f_me) != 0 { + if (re.flag & f_me) != 0 { res.write_string('$') } @@ -1819,7 +1815,7 @@ pub mut: @[direct_array_access] pub fn (mut re RE) match_base(in_txt &u8, in_txt_len int) (int, int) { // result status - mut result := regex.no_match_found // function return + mut result := no_match_found // function return mut ch := rune(0) // examinated char mut char_len := 0 // utf8 examinated char len @@ -1851,7 +1847,7 @@ pub fn (mut re RE) match_base(in_txt &u8, in_txt_len int) (int, int) { ist = re.prog[state.pc].ist } else if state.pc >= re.prog.len { // println("ERROR!! PC overflow!!") - return regex.err_internal_error, state.i + return err_internal_error, state.i } //****************************************** @@ -1870,7 +1866,7 @@ pub fn (mut re RE) match_base(in_txt &u8, in_txt_len int) (int, int) { } else { // print only the exe instruction if (re.debug == 1 && m_state == .ist_load) || re.debug == 2 { - if ist == regex.ist_prog_end { + if ist == ist_prog_end { buf2.write_string('# ${step_count:3d} PROG_END\n') } else if ist == 0 || m_state in [.start, .ist_next, .stop] { buf2.write_string('# ${step_count:3d} s: ${state_str(m_state):12s} PC: NA\n') @@ -1881,34 +1877,34 @@ pub fn (mut re RE) match_base(in_txt &u8, in_txt_len int) (int, int) { buf2.write_string('${ist:8x}'.replace(' ', '0')) buf2.write_string(" i,ch,len:[${state.i:3d},'${utf8_str(ch)}',${char_len}] f.m:[${state.first_match:3d},${state.match_index:3d}] ") - if ist == regex.ist_simple_char { + if ist == ist_simple_char { if re.prog[state.pc].flag == 0 { buf2.write_string('query_ch: [${re.prog[state.pc].ch:1c}]') } else { buf2.write_string('query_ch: [0x${re.prog[state.pc].ch:02X}]') } } else { - if ist == regex.ist_bsls_char { + if ist == ist_bsls_char { buf2.write_string('BSLS [\\${re.prog[state.pc].ch:1c}]') - } else if ist == regex.ist_prog_end { + } else if ist == ist_prog_end { buf2.write_string('PROG_END') - } else if ist == regex.ist_or_branch { + } else if ist == ist_or_branch { buf2.write_string('OR') - } else if ist == regex.ist_char_class_pos { + } else if ist == ist_char_class_pos { buf2.write_string('CHAR_CLASS_POS[${re.get_char_class(state.pc)}]') - } else if ist == regex.ist_char_class_neg { + } else if ist == ist_char_class_neg { buf2.write_string('CHAR_CLASS_NEG[${re.get_char_class(state.pc)}]') - } else if ist == regex.ist_dot_char { + } else if ist == ist_dot_char { buf2.write_string('DOT_CHAR') - } else if ist == regex.ist_group_start { + } else if ist == ist_group_start { tmp_gi := re.prog[state.pc].group_id tmp_gr := re.prog[re.prog[state.pc].goto_pc].group_rep buf2.write_string('GROUP_START #:${tmp_gi} rep:${tmp_gr} ') - } else if ist == regex.ist_group_end { + } else if ist == ist_group_end { buf2.write_string('GROUP_END #:${re.prog[state.pc].group_id} deep:${state.group_index}') } } - if re.prog[state.pc].rep_max == regex.max_quantifier { + if re.prog[state.pc].rep_max == max_quantifier { buf2.write_string('{${re.prog[state.pc].rep_min},MAX}:${re.prog[state.pc].rep}') } else { buf2.write_string('{${re.prog[state.pc].rep_min},${re.prog[state.pc].rep_max}}:${re.prog[state.pc].rep}') @@ -1918,7 +1914,7 @@ pub fn (mut re RE) match_base(in_txt &u8, in_txt_len int) (int, int) { } buf2.write_string(' (#${state.group_index})') - if ist == regex.ist_dot_char { + if ist == ist_dot_char { buf2.write_string(' last!') } @@ -1933,7 +1929,7 @@ pub fn (mut re RE) match_base(in_txt &u8, in_txt_len int) (int, int) { } //****************************************** - if ist == regex.ist_prog_end { + if ist == ist_prog_end { // println("HERE we end!") break } @@ -1989,7 +1985,7 @@ pub fn (mut re RE) match_base(in_txt &u8, in_txt_len int) (int, int) { // println("re.groups: ${re.groups}") // the text is finished and the groups closed and we are the last group, ok exit - if ist == regex.ist_group_end && re.prog[state.pc + 1].ist == regex.ist_prog_end { + if ist == ist_group_end && re.prog[state.pc + 1].ist == ist_prog_end { // println("Last group end") return state.first_match, state.i } @@ -2003,16 +1999,16 @@ pub fn (mut re RE) match_base(in_txt &u8, in_txt_len int) (int, int) { // println("min_rep: ${re.prog[state.pc].rep_min} max_rep: ${re.prog[state.pc].rep_max} rep: ${re.prog[state.pc].rep}") // program end - if ist == regex.ist_prog_end { + if ist == ist_prog_end { // println("Program end on end of text!") return state.first_match, state.i } if l_ist in [ - u32(regex.ist_char_class_neg), - regex.ist_char_class_pos, - regex.ist_bsls_char, - regex.ist_dot_char, + u32(ist_char_class_neg), + ist_char_class_pos, + ist_bsls_char, + ist_dot_char, ] { // println("***** We have a last special token") // println("PC: ${state.pc} last_dot_flag:${re.prog[state.pc].last_dot_flag}") @@ -2027,7 +2023,7 @@ pub fn (mut re RE) match_base(in_txt &u8, in_txt_len int) (int, int) { // println("Not fitted!!") } // no groups open, check the last token quantifier - if ist != regex.ist_group_end && re.prog[state.pc + 1].ist == regex.ist_prog_end { + if ist != ist_group_end && re.prog[state.pc + 1].ist == ist_prog_end { if re.prog[state.pc].rep >= re.prog[state.pc].rep_min && re.prog[state.pc].rep <= re.prog[state.pc].rep_max { // println("We are in good repetition") @@ -2036,13 +2032,13 @@ pub fn (mut re RE) match_base(in_txt &u8, in_txt_len int) (int, int) { } // println("No good exit!!") - if re.prog[re.prog_len - 1].ist == regex.ist_group_end { + if re.prog[re.prog_len - 1].ist == ist_group_end { // println("last ist is a group end!") if re.prog[re.prog_len - 1].group_rep >= re.prog[re.prog_len - 1].rep_min { return state.first_match, state.i } } - return regex.no_match_found, state.i + return no_match_found, state.i } // starting and init @@ -2059,7 +2055,7 @@ pub fn (mut re RE) match_base(in_txt &u8, in_txt_len int) (int, int) { // check if we are in the program bounds if state.pc < 0 || state.pc > re.prog.len { // println("ERROR!! PC overflow!!") - return regex.err_internal_error, state.i + return err_internal_error, state.i } m_state = .ist_load continue @@ -2070,7 +2066,7 @@ pub fn (mut re RE) match_base(in_txt &u8, in_txt_len int) (int, int) { // check if we are in the program bounds if state.pc < 0 || state.pc > re.prog.len { // println("ERROR!! PC overflow!!") - return regex.err_internal_error, state.i + return err_internal_error, state.i } m_state = .ist_load continue @@ -2080,14 +2076,14 @@ pub fn (mut re RE) match_base(in_txt &u8, in_txt_len int) (int, int) { ch, char_len = re.get_charb(in_txt, state.i) // check new line if flag f_nl enabled - if (re.flag & regex.f_nl) != 0 && char_len == 1 && u8(ch) in regex.new_line_list { + if (re.flag & f_nl) != 0 && char_len == 1 && u8(ch) in new_line_list { m_state = .new_line continue } // check if stop else if m_state == .stop { // we are in search mode, don't exit until the end - if (re.flag & regex.f_src) != 0 && ist != regex.ist_prog_end { + if (re.flag & f_src) != 0 && ist != ist_prog_end { last_fnd_pc = state.pc state.pc = -1 state.i += char_len @@ -2103,7 +2099,7 @@ pub fn (mut re RE) match_base(in_txt &u8, in_txt_len int) (int, int) { continue } - if ist == regex.ist_prog_end { + if ist == ist_prog_end { return state.first_match, state.i } @@ -2114,7 +2110,7 @@ pub fn (mut re RE) match_base(in_txt &u8, in_txt_len int) (int, int) { state = re.state_list.pop() state.match_flag = true - l_ist = u32(regex.ist_dot_char) + l_ist = u32(ist_dot_char) if state.first_match < 0 { state.first_match = state.i @@ -2133,7 +2129,7 @@ pub fn (mut re RE) match_base(in_txt &u8, in_txt_len int) (int, int) { // ist_load else if m_state == .ist_load { // program end - if ist == regex.ist_prog_end { + if ist == ist_prog_end { // if we are in match exit well if state.group_index >= 0 && state.match_index >= 0 { @@ -2144,7 +2140,7 @@ pub fn (mut re RE) match_base(in_txt &u8, in_txt_len int) (int, int) { continue } // check GROUP start, no quantifier is checkd for this token!! - else if ist == regex.ist_group_start { + else if ist == ist_group_start { state.group_index++ re.group_data[state.group_index] = re.prog[state.pc].goto_pc // save where is ist_group_end, we will use it for escape re.group_stack[state.group_index] = state.i // index where we start to manage @@ -2154,7 +2150,7 @@ pub fn (mut re RE) match_base(in_txt &u8, in_txt_len int) (int, int) { continue } // check GROUP end - else if ist == regex.ist_group_end { + else if ist == ist_group_end { // we are in matching streak // println("Group END!! last ist: ${l_ist:08x}") if state.match_index >= 0 { @@ -2205,7 +2201,7 @@ pub fn (mut re RE) match_base(in_txt &u8, in_txt_len int) (int, int) { continue } // check OR - else if ist == regex.ist_or_branch { + else if ist == ist_or_branch { if state.match_index >= 0 { state.pc = re.prog[state.pc].rep_max // println("ist_or_branch True pc: $state.pc") @@ -2218,7 +2214,7 @@ pub fn (mut re RE) match_base(in_txt &u8, in_txt_len int) (int, int) { continue } // check ist_dot_char - else if ist == regex.ist_dot_char { + else if ist == ist_dot_char { // println("ist_dot_char rep: ${re.prog[state.pc].rep}") // check next token to be false @@ -2239,17 +2235,17 @@ pub fn (mut re RE) match_base(in_txt &u8, in_txt_len int) (int, int) { chk_pc := re.prog[state.pc].dot_check_pc // simple char - if re.prog[chk_pc].ist == regex.ist_simple_char { + if re.prog[chk_pc].ist == ist_simple_char { if re.prog[chk_pc].ch == ch_t { next_check_flag = true } // println("Check [ist_simple_char] [${re.prog[chk_pc].ch}]==[${ch_t:c}] => $next_check_flag") } // char char_class - else if re.prog[chk_pc].ist == regex.ist_char_class_pos - || re.prog[chk_pc].ist == regex.ist_char_class_neg { + else if re.prog[chk_pc].ist == ist_char_class_pos + || re.prog[chk_pc].ist == ist_char_class_neg { mut cc_neg := false - if re.prog[chk_pc].ist == regex.ist_char_class_neg { + if re.prog[chk_pc].ist == ist_char_class_neg { cc_neg = true } mut cc_res := re.check_char_class(chk_pc, ch_t) @@ -2261,14 +2257,14 @@ pub fn (mut re RE) match_base(in_txt &u8, in_txt_len int) (int, int) { // println("Check [ist_char_class] => $next_check_flag") } // check bsls - else if re.prog[chk_pc].ist == regex.ist_bsls_char { + else if re.prog[chk_pc].ist == ist_bsls_char { next_check_flag = re.prog[chk_pc].validator(u8(ch_t)) // println("Check [ist_bsls_char] => $next_check_flag") } } // check if we must continue or pass to the next IST - if next_check_flag == true && re.prog[state.pc + 1].ist != regex.ist_prog_end { + if next_check_flag == true && re.prog[state.pc + 1].ist != ist_prog_end { // println("save the state!!") mut dot_state := StateObj{ group_index: state.group_index @@ -2293,7 +2289,7 @@ pub fn (mut re RE) match_base(in_txt &u8, in_txt_len int) (int, int) { } state.match_flag = true - l_ist = u32(regex.ist_dot_char) + l_ist = u32(ist_dot_char) if state.first_match < 0 { state.first_match = state.i @@ -2306,7 +2302,7 @@ pub fn (mut re RE) match_base(in_txt &u8, in_txt_len int) (int, int) { continue } // char class IST - else if ist == regex.ist_char_class_pos || ist == regex.ist_char_class_neg { + else if ist == ist_char_class_pos || ist == ist_char_class_neg { // check next token to be false mut next_check_flag := false @@ -2325,17 +2321,17 @@ pub fn (mut re RE) match_base(in_txt &u8, in_txt_len int) (int, int) { chk_pc := re.prog[state.pc].cc_check_pc // simple char - if re.prog[chk_pc].ist == regex.ist_simple_char { + if re.prog[chk_pc].ist == ist_simple_char { if re.prog[chk_pc].ch == ch_t { next_check_flag = true } // println("Check [ist_simple_char] [${re.prog[chk_pc].ch}]==[${ch_t:c}] => $next_check_flag") } // char char_class - else if re.prog[chk_pc].ist == regex.ist_char_class_pos - || re.prog[chk_pc].ist == regex.ist_char_class_neg { + else if re.prog[chk_pc].ist == ist_char_class_pos + || re.prog[chk_pc].ist == ist_char_class_neg { mut cc_neg := false - if re.prog[chk_pc].ist == regex.ist_char_class_neg { + if re.prog[chk_pc].ist == ist_char_class_neg { cc_neg = true } mut cc_res := re.check_char_class(chk_pc, ch_t) @@ -2347,14 +2343,14 @@ pub fn (mut re RE) match_base(in_txt &u8, in_txt_len int) (int, int) { // println("Check [ist_char_class] => $next_check_flag") } // check bsls - else if re.prog[chk_pc].ist == regex.ist_bsls_char { + else if re.prog[chk_pc].ist == ist_bsls_char { next_check_flag = re.prog[chk_pc].validator(u8(ch_t)) // println("Check [ist_bsls_char] => $next_check_flag") } } // check if we must continue or pass to the next IST - if next_check_flag == true && re.prog[state.pc + 1].ist != regex.ist_prog_end { + if next_check_flag == true && re.prog[state.pc + 1].ist != ist_prog_end { // println("save the state!!") mut dot_state := StateObj{ group_index: state.group_index @@ -2383,7 +2379,7 @@ pub fn (mut re RE) match_base(in_txt &u8, in_txt_len int) (int, int) { state.match_flag = false mut cc_neg := false - if ist == regex.ist_char_class_neg { + if ist == ist_char_class_neg { cc_neg = true } mut cc_res := re.check_char_class(state.pc, ch) @@ -2400,7 +2396,7 @@ pub fn (mut re RE) match_base(in_txt &u8, in_txt_len int) (int, int) { if cc_res { state.match_flag = true - l_ist = u32(regex.ist_char_class_pos) + l_ist = u32(ist_char_class_pos) if state.first_match < 0 { state.first_match = state.i @@ -2417,7 +2413,7 @@ pub fn (mut re RE) match_base(in_txt &u8, in_txt_len int) (int, int) { continue } // check bsls - else if ist == regex.ist_bsls_char { + else if ist == ist_bsls_char { // println("ist_bsls_char rep: ${re.prog[state.pc].rep}") // check next token to be false @@ -2438,17 +2434,17 @@ pub fn (mut re RE) match_base(in_txt &u8, in_txt_len int) (int, int) { chk_pc := re.prog[state.pc].bsls_check_pc // simple char - if re.prog[chk_pc].ist == regex.ist_simple_char { + if re.prog[chk_pc].ist == ist_simple_char { if re.prog[chk_pc].ch == ch_t { next_check_flag = true } // println("Check [ist_simple_char] [${re.prog[chk_pc].ch}]==[${ch_t:c}] => $next_check_flag") } // char char_class - else if re.prog[chk_pc].ist == regex.ist_char_class_pos - || re.prog[chk_pc].ist == regex.ist_char_class_neg { + else if re.prog[chk_pc].ist == ist_char_class_pos + || re.prog[chk_pc].ist == ist_char_class_neg { mut cc_neg := false - if re.prog[chk_pc].ist == regex.ist_char_class_neg { + if re.prog[chk_pc].ist == ist_char_class_neg { cc_neg = true } mut cc_res := re.check_char_class(chk_pc, ch_t) @@ -2460,14 +2456,14 @@ pub fn (mut re RE) match_base(in_txt &u8, in_txt_len int) (int, int) { // println("Check [ist_char_class] => $next_check_flag") } // check bsls - else if re.prog[chk_pc].ist == regex.ist_bsls_char { + else if re.prog[chk_pc].ist == ist_bsls_char { next_check_flag = re.prog[chk_pc].validator(u8(ch_t)) // println("Check [ist_bsls_char] => $next_check_flag") } } // check if we must continue or pass to the next IST - if next_check_flag == true && re.prog[state.pc + 1].ist != regex.ist_prog_end { + if next_check_flag == true && re.prog[state.pc + 1].ist != ist_prog_end { // println("save the state!!") mut dot_state := StateObj{ group_index: state.group_index @@ -2499,7 +2495,7 @@ pub fn (mut re RE) match_base(in_txt &u8, in_txt_len int) (int, int) { // println("${ch} => ${tmp_res}") state.match_flag = true - l_ist = u32(regex.ist_dot_char) + l_ist = u32(ist_dot_char) if state.first_match < 0 { state.first_match = state.i @@ -2512,14 +2508,14 @@ pub fn (mut re RE) match_base(in_txt &u8, in_txt_len int) (int, int) { continue } // simple char IST - else if ist == regex.ist_simple_char { + else if ist == ist_simple_char { // println("ist_simple_char") state.match_flag = false if re.prog[state.pc].ch == ch && (state.i < in_txt_len - 1 || re.prog[state.pc].ch != 0) { state.match_flag = true - l_ist = regex.ist_simple_char + l_ist = ist_simple_char if state.first_match < 0 { state.first_match = state.i @@ -2537,7 +2533,7 @@ pub fn (mut re RE) match_base(in_txt &u8, in_txt_len int) (int, int) { } // UNREACHABLE // println("PANIC2!! state: $m_state") - return regex.err_internal_error, state.i + return err_internal_error, state.i } /*********************************** * Quantifier management @@ -2547,7 +2543,7 @@ pub fn (mut re RE) match_base(in_txt &u8, in_txt_len int) (int, int) { // we are finished here if state.group_index < 0 { // println("Early stop!") - result = regex.no_match_found + result = no_match_found m_state = .stop continue } @@ -2592,7 +2588,7 @@ pub fn (mut re RE) match_base(in_txt &u8, in_txt_len int) (int, int) { continue } - result = regex.no_match_found + result = no_match_found m_state = .stop continue } else if rep == 0 && rep < re.prog[tmp_pc].rep_min { @@ -2605,14 +2601,14 @@ pub fn (mut re RE) match_base(in_txt &u8, in_txt_len int) (int, int) { continue } - result = regex.no_match_found + result = no_match_found m_state = .stop continue } // println("DO NOT STAY HERE!! {${re.prog[tmp_pc].rep_min},${re.prog[tmp_pc].rep_max}}:$rep") // UNREACHABLE - return regex.err_internal_error, state.i + return err_internal_error, state.i } // ist_quant_pg => quantifier positive test on group else if m_state == .ist_quant_pg { @@ -2624,7 +2620,7 @@ pub fn (mut re RE) match_base(in_txt &u8, in_txt_len int) (int, int) { if re.prog[tmp_pc].group_neg == true { // println("***** Negation of the group") - result = regex.no_match_found + result = no_match_found m_state = .stop continue } @@ -2662,7 +2658,7 @@ pub fn (mut re RE) match_base(in_txt &u8, in_txt_len int) (int, int) { // UNREACHABLE // println("PANIC3!! state: $m_state") - return regex.err_internal_error, state.i + return err_internal_error, state.i } // ist_quant_n => quantifier negative test on token else if m_state == .ist_quant_n { @@ -2699,14 +2695,14 @@ pub fn (mut re RE) match_base(in_txt &u8, in_txt_len int) (int, int) { // no other options // println("ist_quant_n no_match_found") - result = regex.no_match_found + result = no_match_found m_state = .stop // stop already started matching outside a capturing group if re.state_list.len > 0 && re.state_list.last().group_index == -1 && re.state_list.last().last_dot_pc > 0 { - if ist == regex.ist_dot_char || ist == regex.ist_bsls_char { - return regex.no_match_found, 0 + if ist == ist_dot_char || ist == ist_bsls_char { + return no_match_found, 0 } } continue @@ -2715,7 +2711,7 @@ pub fn (mut re RE) match_base(in_txt &u8, in_txt_len int) (int, int) { else if m_state == .ist_quant_p { // println("Here .ist_quant_p") // exit on first match - if (re.flag & regex.f_efm) != 0 { + if (re.flag & f_efm) != 0 { return state.i, state.i + 1 } @@ -2749,17 +2745,17 @@ pub fn (mut re RE) match_base(in_txt &u8, in_txt_len int) (int, int) { } // UNREACHABLE // println("PANIC4!! state: $m_state") - return regex.err_internal_error, state.i + return err_internal_error, state.i } // println("Check end of text!") // Check the results if state.match_index >= 0 { if state.group_index < 0 { - if re.prog[state.pc].ist == regex.ist_prog_end { + if re.prog[state.pc].ist == ist_prog_end { // println("program ended!!") - if (re.flag & regex.f_src) != 0 { + if (re.flag & f_src) != 0 { // println("find return") return state.first_match, state.i } else { @@ -2770,22 +2766,21 @@ pub fn (mut re RE) match_base(in_txt &u8, in_txt_len int) (int, int) { // println("No Group here, natural end [$state.first_match,$state.i] state: ${state_str(m_state)} ist: $ist pgr_end: $re.prog.len") - if re.prog[state.pc + 1].ist == regex.ist_prog_end - || re.prog[state.pc].ist == regex.ist_prog_end { + if re.prog[state.pc + 1].ist == ist_prog_end || re.prog[state.pc].ist == ist_prog_end { rep := re.prog[state.pc].rep // println("rep: $rep re.prog[state.pc].rep_min: ${re.prog[state.pc].rep_min} re.prog[state.pc].rep_max: ${re.prog[state.pc].rep_max}") if rep >= re.prog[state.pc].rep_min && rep <= re.prog[state.pc].rep_max { return state.first_match, state.i } // println("Program not finished! ") - return regex.no_match_found, state.i + return no_match_found, state.i } if src_end { // println("program end") return state.first_match, state.i } // print("No match found!!") - return regex.no_match_found, state.i + return no_match_found, state.i } else { // println("Group match! OK") // println("first_match: $state.first_match, i: $state.i") @@ -2796,5 +2791,5 @@ pub fn (mut re RE) match_base(in_txt &u8, in_txt_len int) (int, int) { } } // println("no_match_found, natural end") - return regex.no_match_found, state.i + return no_match_found, state.i } diff --git a/vlib/semver/parse.v b/vlib/semver/parse.v index 0864ae61cb8c5b..5d28023cd927ae 100644 --- a/vlib/semver/parse.v +++ b/vlib/semver/parse.v @@ -41,9 +41,8 @@ fn (ver RawVersion) is_valid() bool { if ver.raw_ints.len != 3 { return false } - return is_valid_number(ver.raw_ints[semver.ver_major]) - && is_valid_number(ver.raw_ints[semver.ver_minor]) - && is_valid_number(ver.raw_ints[semver.ver_patch]) && is_valid_string(ver.prerelease) + return is_valid_number(ver.raw_ints[ver_major]) && is_valid_number(ver.raw_ints[ver_minor]) + && is_valid_number(ver.raw_ints[ver_patch]) && is_valid_string(ver.prerelease) && is_valid_string(ver.metadata) } @@ -53,7 +52,7 @@ fn (ver RawVersion) is_missing(typ int) bool { fn (raw_ver RawVersion) coerce() !Version { ver := raw_ver.complete() - if !is_valid_number(ver.raw_ints[semver.ver_major]) { + if !is_valid_number(ver.raw_ints[ver_major]) { return error('Invalid major version: ${ver.raw_ints}[ver_major]') } return ver.to_version() @@ -79,5 +78,5 @@ fn (raw_ver RawVersion) validate() ?Version { } fn (raw_ver RawVersion) to_version() Version { - return Version{raw_ver.raw_ints[semver.ver_major].int(), raw_ver.raw_ints[semver.ver_minor].int(), raw_ver.raw_ints[semver.ver_patch].int(), raw_ver.prerelease, raw_ver.metadata} + return Version{raw_ver.raw_ints[ver_major].int(), raw_ver.raw_ints[ver_minor].int(), raw_ver.raw_ints[ver_patch].int(), raw_ver.prerelease, raw_ver.metadata} } diff --git a/vlib/semver/range.v b/vlib/semver/range.v index 00d555a9ad2720..3eb489c6aa0736 100644 --- a/vlib/semver/range.v +++ b/vlib/semver/range.v @@ -55,7 +55,7 @@ fn (c Comparator) satisfies(ver Version) bool { } fn parse_range(input string) !Range { - raw_comparator_sets := input.split(semver.comparator_set_sep) + raw_comparator_sets := input.split(comparator_set_sep) mut comparator_sets := []ComparatorSet{} for raw_comp_set in raw_comparator_sets { if can_expand(raw_comp_set) { @@ -70,7 +70,7 @@ fn parse_range(input string) !Range { } fn parse_comparator_set(input string) !ComparatorSet { - raw_comparators := input.split(semver.comparator_sep) + raw_comparators := input.split(comparator_sep) if raw_comparators.len > 2 { return &InvalidComparatorFormatError{ msg: 'Invalid format of comparator set for input "${input}"' @@ -121,7 +121,7 @@ fn parse_comparator(input string) ?Comparator { fn parse_xrange(input string) ?Version { mut raw_ver := parse(input).complete() for typ in versions { - if raw_ver.raw_ints[typ].index_any(semver.x_range_symbols) == -1 { + if raw_ver.raw_ints[typ].index_any(x_range_symbols) == -1 { continue } match typ { @@ -144,8 +144,8 @@ fn parse_xrange(input string) ?Version { } fn can_expand(input string) bool { - return input[0] == `~` || input[0] == `^` || input.contains(semver.hyphen_range_sep) - || input.index_any(semver.x_range_symbols) > -1 + return input[0] == `~` || input[0] == `^` || input.contains(hyphen_range_sep) + || input.index_any(x_range_symbols) > -1 } fn expand_comparator_set(input string) ?ComparatorSet { @@ -154,7 +154,7 @@ fn expand_comparator_set(input string) ?ComparatorSet { `^` { return expand_caret(input[1..]) } else {} } - if input.contains(semver.hyphen_range_sep) { + if input.contains(hyphen_range_sep) { return expand_hyphen(input) } return expand_xrange(input) @@ -181,7 +181,7 @@ fn expand_caret(raw_version string) ?ComparatorSet { } fn expand_hyphen(raw_range string) ?ComparatorSet { - raw_versions := raw_range.split(semver.hyphen_range_sep) + raw_versions := raw_range.split(hyphen_range_sep) if raw_versions.len != 2 { return none } diff --git a/vlib/strconv/atof.c.v b/vlib/strconv/atof.c.v index 5587ec3aad12f9..c35aabbae6c7cd 100644 --- a/vlib/strconv/atof.c.v +++ b/vlib/strconv/atof.c.v @@ -95,7 +95,7 @@ fn sub96(s2 u32, s1 u32, s0 u32, d2 u32, d1 u32, d0 u32) (u32, u32, u32) { // Utility functions fn is_digit(x u8) bool { - return x >= strconv.c_zero && x <= strconv.c_nine + return x >= c_zero && x <= c_nine } fn is_space(x u8) bool { @@ -146,9 +146,9 @@ fn parser(s string) (ParserState, PrepNumber) { // read mantissa for i < s.len && s[i].is_digit() { // println("$i => ${s[i]}") - if digx < strconv.digits { + if digx < digits { pn.mantissa *= 10 - pn.mantissa += u64(s[i] - strconv.c_zero) + pn.mantissa += u64(s[i] - c_zero) digx++ } else if pn.exponent < 2147483647 { pn.exponent++ @@ -160,9 +160,9 @@ fn parser(s string) (ParserState, PrepNumber) { if i < s.len && s[i] == `.` { i++ for i < s.len && s[i].is_digit() { - if digx < strconv.digits { + if digx < digits { pn.mantissa *= 10 - pn.mantissa += u64(s[i] - strconv.c_zero) + pn.mantissa += u64(s[i] - c_zero) pn.exponent-- digx++ } @@ -175,9 +175,9 @@ fn parser(s string) (ParserState, PrepNumber) { i++ if i < s.len { // esponent sign - if s[i] == strconv.c_plus { + if s[i] == c_plus { i++ - } else if s[i] == strconv.c_minus { + } else if s[i] == c_minus { expneg = true i++ } @@ -185,7 +185,7 @@ fn parser(s string) (ParserState, PrepNumber) { for i < s.len && s[i].is_digit() { if expexp < 214748364 { expexp *= 10 - expexp += int(s[i] - strconv.c_zero) + expexp += int(s[i] - c_zero) } i++ } @@ -266,18 +266,18 @@ fn converter(mut pn PrepNumber) u64 { s1 = q1 s0 = q0 } - q2 = s2 / strconv.c_ten - r1 = s2 % strconv.c_ten + q2 = s2 / c_ten + r1 = s2 % c_ten r2 = (s1 >> 8) | (r1 << 24) - q1 = r2 / strconv.c_ten - r1 = r2 % strconv.c_ten + q1 = r2 / c_ten + r1 = r2 % c_ten r2 = ((s1 & u32(0xFF)) << 16) | (s0 >> 16) | (r1 << 24) - r0 = r2 / strconv.c_ten - r1 = r2 % strconv.c_ten + r0 = r2 / c_ten + r1 = r2 % c_ten q1 = (q1 << 8) | ((r0 & u32(0x00FF0000)) >> 16) q0 = r0 << 16 r2 = (s0 & u32(0xFFFF)) | (r1 << 16) - q0 |= r2 / strconv.c_ten + q0 |= r2 / c_ten s2 = q2 s1 = q1 s0 = q0 @@ -365,15 +365,15 @@ fn converter(mut pn PrepNumber) u64 { binexp += 1023 if binexp > 2046 { if pn.negative { - result = strconv.double_minus_infinity + result = double_minus_infinity } else { - result = strconv.double_plus_infinity + result = double_plus_infinity } } else if binexp < 1 { if pn.negative { - result = strconv.double_minus_zero + result = double_minus_zero } else { - result = strconv.double_plus_zero + result = double_plus_zero } } else if s2 != 0 { mut q := u64(0) @@ -399,16 +399,16 @@ pub fn atof64(s string) !f64 { res.u = converter(mut pn) } .pzero { - res.u = strconv.double_plus_zero + res.u = double_plus_zero } .mzero { - res.u = strconv.double_minus_zero + res.u = double_minus_zero } .pinf { - res.u = strconv.double_plus_infinity + res.u = double_plus_infinity } .minf { - res.u = strconv.double_minus_infinity + res.u = double_minus_infinity } .invalid_number { return error('not a number') diff --git a/vlib/strconv/atofq.c.v b/vlib/strconv/atofq.c.v index 808c57acea06c6..1ad2e783a370b9 100644 --- a/vlib/strconv/atofq.c.v +++ b/vlib/strconv/atofq.c.v @@ -96,7 +96,7 @@ pub fn atof_quick(s string) f64 { i++ } if exp_sign == 1 { - if exp > strconv.pos_exp.len { + if exp > pos_exp.len { if sign > 0 { f.u = double_plus_infinity } else { @@ -105,12 +105,12 @@ pub fn atof_quick(s string) f64 { return unsafe { f.f } } tmp_mul := Float64u{ - u: strconv.pos_exp[exp] + u: pos_exp[exp] } // C.printf("exp: %d [0x%016llx] %f,",exp,pos_exp[exp],tmp_mul) f.f = unsafe { f.f * tmp_mul.f } } else { - if exp > strconv.neg_exp.len { + if exp > neg_exp.len { if sign > 0 { f.u = double_plus_zero } else { @@ -119,7 +119,7 @@ pub fn atof_quick(s string) f64 { return unsafe { f.f } } tmp_mul := Float64u{ - u: strconv.neg_exp[exp] + u: neg_exp[exp] } // C.printf("exp: %d [0x%016llx] %f,",exp,pos_exp[exp],tmp_mul) diff --git a/vlib/strconv/atoi.v b/vlib/strconv/atoi.v index e8350376311552..bd546a91b2d132 100644 --- a/vlib/strconv/atoi.v +++ b/vlib/strconv/atoi.v @@ -79,14 +79,14 @@ pub fn common_parse_uint2(s string, _base int, _bit_size int) (u64, int) { } if bit_size == 0 { - bit_size = strconv.int_size + bit_size = int_size } else if bit_size < 0 || bit_size > 64 { return u64(0), -2 } // Cutoff is the smallest number such that cutoff*base > maxUint64. // Use compile-time constants for common cases. - cutoff := strconv.max_u64 / u64(base) + u64(1) - max_val := if bit_size == 64 { strconv.max_u64 } else { (u64(1) << u64(bit_size)) - u64(1) } + cutoff := max_u64 / u64(base) + u64(1) + max_val := if bit_size == 64 { max_u64 } else { (u64(1) << u64(bit_size)) - u64(1) } basem1 := base - 1 mut n := u64(0) @@ -162,7 +162,7 @@ pub fn common_parse_int(_s string, base int, _bit_size int, error_on_non_digit b } mut bit_size := _bit_size if bit_size == 0 { - bit_size = strconv.int_size + bit_size = int_size } mut s := _s // Pick off leading sign. @@ -223,8 +223,8 @@ pub fn atoi(s string) !int { if s == '' { return error('strconv.atoi: parsing "": invalid syntax') } - if (strconv.int_size == 32 && (0 < s.len && s.len < 10)) - || (strconv.int_size == 64 && (0 < s.len && s.len < 19)) { + if (int_size == 32 && (0 < s.len && s.len < 10)) + || (int_size == 64 && (0 < s.len && s.len < 19)) { // Fast path for small integers that fit int type. mut start_idx := 0 if s[0] == `-` || s[0] == `+` { diff --git a/vlib/strconv/f32_str.c.v b/vlib/strconv/f32_str.c.v index e9dfde37868fa7..682d123cd563ee 100644 --- a/vlib/strconv/f32_str.c.v +++ b/vlib/strconv/f32_str.c.v @@ -79,8 +79,8 @@ pub fn (d Dec32) get_string_32(neg bool, i_n_digit int, i_pad_digit int) string if n_digit < out_len { // println("orig: ${out_len_original}") - out += strconv.ten_pow_table_32[out_len - n_digit - 1] * 5 // round to up - out /= strconv.ten_pow_table_32[out_len - n_digit] + out += ten_pow_table_32[out_len - n_digit - 1] * 5 // round to up + out /= ten_pow_table_32[out_len - n_digit] out_len = n_digit } @@ -147,11 +147,11 @@ pub fn (d Dec32) get_string_32(neg bool, i_n_digit int, i_pad_digit int) string fn f32_to_decimal_exact_int(i_mant u32, exp u32) (Dec32, bool) { mut d := Dec32{} - e := exp - strconv.bias32 - if e > strconv.mantbits32 { + e := exp - bias32 + if e > mantbits32 { return d, false } - shift := strconv.mantbits32 - e + shift := mantbits32 - e mant := i_mant | 0x0080_0000 // implicit 1 // mant := i_mant | (1 << mantbits32) // implicit 1 d.m = mant >> shift @@ -171,11 +171,11 @@ fn f32_to_decimal(mant u32, exp u32) Dec32 { if exp == 0 { // We subtract 2 so that the bounds computation has // 2 additional bits. - e2 = 1 - strconv.bias32 - int(strconv.mantbits32) - 2 + e2 = 1 - bias32 - int(mantbits32) - 2 m2 = mant } else { - e2 = int(exp) - strconv.bias32 - int(strconv.mantbits32) - 2 - m2 = (u32(1) << strconv.mantbits32) | mant + e2 = int(exp) - bias32 - int(mantbits32) - 2 + m2 = (u32(1) << mantbits32) | mant } even := (m2 & 1) == 0 accept_bounds := even @@ -323,14 +323,14 @@ pub fn f32_to_str(f f32, n_digit int) string { u1.f = f u := unsafe { u1.u } - neg := (u >> (strconv.mantbits32 + strconv.expbits32)) != 0 - mant := u & ((u32(1) << strconv.mantbits32) - u32(1)) - exp := (u >> strconv.mantbits32) & ((u32(1) << strconv.expbits32) - u32(1)) + neg := (u >> (mantbits32 + expbits32)) != 0 + mant := u & ((u32(1) << mantbits32) - u32(1)) + exp := (u >> mantbits32) & ((u32(1) << expbits32) - u32(1)) // println("${neg} ${mant} e ${exp-bias32}") // Exit early for easy cases. - if exp == strconv.maxexp32 || (exp == 0 && mant == 0) { + if exp == maxexp32 || (exp == 0 && mant == 0) { return get_string_special(neg, exp == 0, mant == 0) } @@ -350,14 +350,14 @@ pub fn f32_to_str_pad(f f32, n_digit int) string { u1.f = f u := unsafe { u1.u } - neg := (u >> (strconv.mantbits32 + strconv.expbits32)) != 0 - mant := u & ((u32(1) << strconv.mantbits32) - u32(1)) - exp := (u >> strconv.mantbits32) & ((u32(1) << strconv.expbits32) - u32(1)) + neg := (u >> (mantbits32 + expbits32)) != 0 + mant := u & ((u32(1) << mantbits32) - u32(1)) + exp := (u >> mantbits32) & ((u32(1) << expbits32) - u32(1)) // println("${neg} ${mant} e ${exp-bias32}") // Exit early for easy cases. - if exp == strconv.maxexp32 || (exp == 0 && mant == 0) { + if exp == maxexp32 || (exp == 0 && mant == 0) { return get_string_special(neg, exp == 0, mant == 0) } diff --git a/vlib/strconv/format_mem.c.v b/vlib/strconv/format_mem.c.v index ba0eb0984d7b55..89ae767ab7a216 100644 --- a/vlib/strconv/format_mem.c.v +++ b/vlib/strconv/format_mem.c.v @@ -105,12 +105,12 @@ pub fn format_dec_sb(d u64, p BF_param, mut res strings.Builder) { d_i = (n - (n1 * 100)) << 1 n = n1 unsafe { - buf[i] = strconv.digit_pairs.str[d_i] + buf[i] = digit_pairs.str[d_i] } i-- d_i++ unsafe { - buf[i] = strconv.digit_pairs.str[d_i] + buf[i] = digit_pairs.str[d_i] } i-- } @@ -328,8 +328,8 @@ pub fn format_fl(f f64, p BF_param) string { tmp.free() } - mut buf := [strconv.max_size_f64_char]u8{} // write temp float buffer in stack - mut out := [strconv.max_size_f64_char]u8{} // out buffer + mut buf := [max_size_f64_char]u8{} // write temp float buffer in stack + mut out := [max_size_f64_char]u8{} // out buffer mut buf_i := 0 // index temporary string mut out_i := 0 // index output string @@ -399,8 +399,8 @@ pub fn format_es(f f64, p BF_param) string { tmp.free() } - mut buf := [strconv.max_size_f64_char]u8{} // write temp float buffer in stack - mut out := [strconv.max_size_f64_char]u8{} // out buffer + mut buf := [max_size_f64_char]u8{} // write temp float buffer in stack + mut out := [max_size_f64_char]u8{} // out buffer mut buf_i := 0 // index temporary string mut out_i := 0 // index output string diff --git a/vlib/strconv/number_to_base.c.v b/vlib/strconv/number_to_base.c.v index 13e9868882dc0d..edea9727a915e5 100644 --- a/vlib/strconv/number_to_base.c.v +++ b/vlib/strconv/number_to_base.c.v @@ -23,7 +23,7 @@ pub fn format_int(n i64, radix int) string { for n_copy != 0 { tmp_0 := res bdx := int(n_copy % radix) - tmp_1 := strconv.base_digits[bdx].ascii_str() + tmp_1 := base_digits[bdx].ascii_str() res = tmp_1 + res tmp_0.free() tmp_1.free() @@ -55,7 +55,7 @@ pub fn format_uint(n u64, radix int) string { uradix := u64(radix) for n_copy != 0 { tmp_0 := res - tmp_1 := strconv.base_digits[n_copy % uradix].ascii_str() + tmp_1 := base_digits[n_copy % uradix].ascii_str() res = tmp_1 + res tmp_0.free() tmp_1.free() diff --git a/vlib/sync/channels.c.v b/vlib/sync/channels.c.v index f3abcbe83cfea6..f2c8141d914677 100644 --- a/vlib/sync/channels.c.v +++ b/vlib/sync/channels.c.v @@ -179,7 +179,7 @@ fn (mut ch Channel) try_push_priv(src voidptr, no_block bool) ChanState { if C.atomic_load_u16(&ch.closed) != 0 { return .closed } - spinloops_sem_, spinloops_ := if no_block { 1, 1 } else { sync.spinloops, sync.spinloops_sem } + spinloops_sem_, spinloops_ := if no_block { 1, 1 } else { spinloops, spinloops_sem } mut have_swapped := false for { mut got_sem := false @@ -367,7 +367,7 @@ pub fn (mut ch Channel) try_pop(dest voidptr) ChanState { } fn (mut ch Channel) try_pop_priv(dest voidptr, no_block bool) ChanState { - spinloops_sem_, spinloops_ := if no_block { 1, 1 } else { sync.spinloops, sync.spinloops_sem } + spinloops_sem_, spinloops_ := if no_block { 1, 1 } else { spinloops, spinloops_sem } mut have_swapped := false mut write_in_progress := false for { diff --git a/vlib/term/ui/color.v b/vlib/term/ui/color.v index 869860a93fd4f5..1abc63d8e39abf 100644 --- a/vlib/term/ui/color.v +++ b/vlib/term/ui/color.v @@ -30,9 +30,9 @@ fn init_color_table() []u32 { color_table_[15] = 0xffffff // color palette for i in 0 .. 216 { - r := ui.value_range[(i / 36) % 6] - g := ui.value_range[(i / 6) % 6] - b := ui.value_range[i % 6] + r := value_range[(i / 36) % 6] + g := value_range[(i / 6) % 6] + b := value_range[i % 6] color_table_[i + 16] = ((u32(r) << 16) & 0xffffff) + ((u32(g) << 8) & 0xffff) + (u32(b) & 0xff) } @@ -71,7 +71,7 @@ fn lookup_rgb(r int, g int, b int) int { color := (u32(r) << 16) + (u32(g) << 8) + u32(b) // lookup extended colors only, coz non-extended can be changed by users. for i in 16 .. 256 { - if ui.color_table[i] == color { + if color_table[i] == color { return i } } diff --git a/vlib/term/ui/input_windows.c.v b/vlib/term/ui/input_windows.c.v index cd1ee853296a41..ff6e76e86168b9 100644 --- a/vlib/term/ui/input_windows.c.v +++ b/vlib/term/ui/input_windows.c.v @@ -136,7 +136,7 @@ fn (mut ctx Context) parse_events() { } // print('$nr_events | ') - if !C.ReadConsoleInput(ctx.stdin_handle, &ctx.read_buf[0], ui.buf_size, &nr_events) { + if !C.ReadConsoleInput(ctx.stdin_handle, &ctx.read_buf[0], buf_size, &nr_events) { panic('could not read from stdin') } for i in 0 .. nr_events { diff --git a/vlib/term/ui/termios_nix.c.v b/vlib/term/ui/termios_nix.c.v index f4f70a64012d4d..77c4f7fcb77af4 100644 --- a/vlib/term/ui/termios_nix.c.v +++ b/vlib/term/ui/termios_nix.c.v @@ -198,7 +198,7 @@ fn supports_truecolor() bool { fn termios_reset() { // C.TCSANOW ?? - mut startup := ui.termios_at_startup + mut startup := termios_at_startup termios.tcsetattr(C.STDIN_FILENO, C.TCSAFLUSH, mut startup) print('\x1b[?1003l\x1b[?1006l\x1b[?25h') flush_stdout() diff --git a/vlib/term/ui/ui.c.v b/vlib/term/ui/ui.c.v index 73c7b69b4c15e5..aa7e8790602a87 100644 --- a/vlib/term/ui/ui.c.v +++ b/vlib/term/ui/ui.c.v @@ -39,9 +39,9 @@ pub fn (mut ctx Context) flush() { if !ctx.enable_su { C.write(1, ctx.print_buf.data, ctx.print_buf.len) } else { - C.write(1, ui.bsu.str, ui.bsu.len) + C.write(1, bsu.str, bsu.len) C.write(1, ctx.print_buf.data, ctx.print_buf.len) - C.write(1, ui.esu.str, ui.esu.len) + C.write(1, esu.str, esu.len) } ctx.print_buf.clear() } diff --git a/vlib/time/duration.v b/vlib/time/duration.v index cfa58d06e72f71..2f6c35cb364d84 100644 --- a/vlib/time/duration.v +++ b/vlib/time/duration.v @@ -19,34 +19,34 @@ pub fn (d Duration) nanoseconds() i64 { // microseconds returns the duration as an integer number of microseconds. pub fn (d Duration) microseconds() i64 { - return i64(d) / time.microsecond + return i64(d) / microsecond } // milliseconds returns the duration as an integer number of milliseconds. pub fn (d Duration) milliseconds() i64 { - return i64(d) / time.millisecond + return i64(d) / millisecond } // The following functions return floating point numbers because it's common to // consider all of them in sub-one intervals // seconds returns the duration as a floating point number of seconds. pub fn (d Duration) seconds() f64 { - return f64(d) / f64(time.second) + return f64(d) / f64(second) } // minutes returns the duration as a floating point number of minutes. pub fn (d Duration) minutes() f64 { - return f64(d) / f64(time.minute) + return f64(d) / f64(minute) } // hours returns the duration as a floating point number of hours. pub fn (d Duration) hours() f64 { - return f64(d) / f64(time.hour) + return f64(d) / f64(hour) } // days returns the duration as a floating point number of days. pub fn (d Duration) days() f64 { - return f64(d) / f64(time.hour * 24) + return f64(d) / f64(hour * 24) } // str pretty prints the duration @@ -60,7 +60,7 @@ pub fn (d Duration) days() f64 { // ns // 234ns // ``` pub fn (d Duration) str() string { - if d == time.infinite { + if d == infinite { return 'inf' } mut sign := '' @@ -69,16 +69,16 @@ pub fn (d Duration) str() string { sign = '-' t = -t } - hr := t / time.hour - t -= hr * time.hour - min := t / time.minute - t -= min * time.minute - sec := t / time.second - t -= sec * time.second - ms := t / time.millisecond - t -= ms * time.millisecond - us := t / time.microsecond - t -= us * time.microsecond + hr := t / hour + t -= hr * hour + min := t / minute + t -= min * minute + sec := t / second + t -= sec * second + ms := t / millisecond + t -= ms * millisecond + us := t / microsecond + t -= us * microsecond ns := t return match true { @@ -101,12 +101,12 @@ pub fn (d Duration) debug() string { x = -x } for label, v in { - 'days': 24 * time.hour - 'h': time.hour - 'm': time.minute - 's': time.second - 'ms': time.millisecond - 'us': time.microsecond + 'days': 24 * hour + 'h': hour + 'm': minute + 's': second + 'ms': millisecond + 'us': microsecond } { if x > v { xx := x / v diff --git a/vlib/time/format.v b/vlib/time/format.v index ae42e6f34283fd..e4e759c510dc3e 100644 --- a/vlib/time/format.v +++ b/vlib/time/format.v @@ -344,9 +344,9 @@ pub fn (t Time) custom_format(s string) string { if i > s.len - j { continue } - if j == 1 || (j == 2 && s[i..i + j] in time.tokens_2) - || (j == 3 && s[i..i + j] in time.tokens_3) - || (j == 4 && s[i..i + j] in time.tokens_4) { + if j == 1 || (j == 2 && s[i..i + j] in tokens_2) + || (j == 3 && s[i..i + j] in tokens_3) + || (j == 4 && s[i..i + j] in tokens_4) { tokens << s[i..i + j] i += (j - 1) break diff --git a/vlib/time/misc/misc.v b/vlib/time/misc/misc.v index 76cb9f6b9e6376..e984468fbd10c1 100644 --- a/vlib/time/misc/misc.v +++ b/vlib/time/misc/misc.v @@ -7,5 +7,5 @@ const start_time_unix = time.now().unix() // random returns a random time struct in *the past*. pub fn random() time.Time { - return time.unix(int(rand.i64n(misc.start_time_unix) or { 0 })) + return time.unix(int(rand.i64n(start_time_unix) or { 0 })) } diff --git a/vlib/time/time.v b/vlib/time/time.v index 96ef330b4f24eb..981ad64b0be37f 100644 --- a/vlib/time/time.v +++ b/vlib/time/time.v @@ -101,7 +101,7 @@ pub fn (t Time) smonth() string { return '---' } i := t.month - 1 - return time.months_string[i * 3..(i + 1) * 3] + return months_string[i * 3..(i + 1) * 3] } // unix returns the UNIX time with second resolution. @@ -215,37 +215,37 @@ pub fn (t Time) relative() string { } else { suffix = ' ago' } - if secs < time.seconds_per_minute / 2 { + if secs < seconds_per_minute / 2 { return 'now' } - if secs < time.seconds_per_hour { - m := secs / time.seconds_per_minute + if secs < seconds_per_hour { + m := secs / seconds_per_minute if m == 1 { return '${prefix}1 minute${suffix}' } return '${prefix}${m} minutes${suffix}' } - if secs < time.seconds_per_hour * 24 { - h := secs / time.seconds_per_hour + if secs < seconds_per_hour * 24 { + h := secs / seconds_per_hour if h == 1 { return '${prefix}1 hour${suffix}' } return '${prefix}${h} hours${suffix}' } - if secs < time.seconds_per_hour * 24 * 7 { - d := secs / time.seconds_per_hour / 24 + if secs < seconds_per_hour * 24 * 7 { + d := secs / seconds_per_hour / 24 if d == 1 { return '${prefix}1 day${suffix}' } return '${prefix}${d} days${suffix}' } - if secs < time.seconds_per_hour * 24 * time.days_in_year { + if secs < seconds_per_hour * 24 * days_in_year { if prefix == 'in ' { return 'on ${t.md()}' } return 'last ${t.md()}' } - y := secs / time.seconds_per_hour / 24 / time.days_in_year + y := secs / seconds_per_hour / 24 / days_in_year if y == 1 { return '${prefix}1 year${suffix}' } @@ -277,31 +277,31 @@ pub fn (t Time) relative_short() string { } else { suffix = ' ago' } - if secs < time.seconds_per_minute / 2 { + if secs < seconds_per_minute / 2 { return 'now' } - if secs < time.seconds_per_hour { - m := secs / time.seconds_per_minute + if secs < seconds_per_hour { + m := secs / seconds_per_minute if m == 1 { return '${prefix}1m${suffix}' } return '${prefix}${m}m${suffix}' } - if secs < time.seconds_per_hour * 24 { - h := secs / time.seconds_per_hour + if secs < seconds_per_hour * 24 { + h := secs / seconds_per_hour if h == 1 { return '${prefix}1h${suffix}' } return '${prefix}${h}h${suffix}' } - if secs < time.seconds_per_hour * 24 * time.days_in_year { - d := secs / time.seconds_per_hour / 24 + if secs < seconds_per_hour * 24 * days_in_year { + d := secs / seconds_per_hour / 24 if d == 1 { return '${prefix}1d${suffix}' } return '${prefix}${d}d${suffix}' } - y := secs / time.seconds_per_hour / 24 / time.days_in_year + y := secs / seconds_per_hour / 24 / days_in_year if y == 1 { return '${prefix}1y${suffix}' } @@ -329,7 +329,7 @@ pub fn (t Time) day_of_week() int { // year_day returns the current day of the year as an integer. // See also #Time.custom_format . pub fn (t Time) year_day() int { - yday := t.day + time.days_before[t.month - 1] + yday := t.day + days_before[t.month - 1] if is_leap_year(t.year) && t.month > 2 { return yday + 1 } @@ -339,13 +339,13 @@ pub fn (t Time) year_day() int { // weekday_str returns the current day as a string 3 letter abbreviation. pub fn (t Time) weekday_str() string { i := t.day_of_week() - 1 - return time.long_days[i][0..3] + return long_days[i][0..3] } // long_weekday_str returns the current day as a string. pub fn (t Time) long_weekday_str() string { i := t.day_of_week() - 1 - return time.long_days[i] + return long_days[i] } // is_leap_year checks if a given a year is a leap year. @@ -359,7 +359,7 @@ pub fn days_in_month(month int, year int) !int { return error('Invalid month: ${month}') } extra := if month == 2 && is_leap_year(year) { 1 } else { 0 } - res := time.month_days[month - 1] + extra + res := month_days[month - 1] + extra return res } diff --git a/vlib/time/time_darwin.c.v b/vlib/time/time_darwin.c.v index e4deeef9001f07..422c7d8ebc37e3 100644 --- a/vlib/time/time_darwin.c.v +++ b/vlib/time/time_darwin.c.v @@ -35,12 +35,12 @@ fn init_time_base() C.mach_timebase_info_data_t { fn sys_mono_now_darwin() u64 { tm := C.mach_absolute_time() - if time.time_base.denom == 0 { + if time_base.denom == 0 { unsafe { - C.mach_timebase_info(&time.time_base) + C.mach_timebase_info(&time_base) } } - return (tm - time.start_time) * time.time_base.numer / time.time_base.denom + return (tm - start_time) * time_base.numer / time_base.denom } // Note: vpc_now_darwin is used by `v -profile` . @@ -48,12 +48,12 @@ fn sys_mono_now_darwin() u64 { @[inline] fn vpc_now_darwin() u64 { tm := C.mach_absolute_time() - if time.time_base.denom == 0 { + if time_base.denom == 0 { unsafe { - C.mach_timebase_info(&time.time_base) + C.mach_timebase_info(&time_base) } } - return (tm - time.start_time) * time.time_base.numer / time.time_base.denom + return (tm - start_time) * time_base.numer / time_base.denom } // darwin_now returns a better precision current time for macos diff --git a/vlib/time/time_windows.c.v b/vlib/time/time_windows.c.v index 7895efb47c6e4b..72c394a275cf1d 100644 --- a/vlib/time/time_windows.c.v +++ b/vlib/time/time_windows.c.v @@ -80,7 +80,7 @@ fn init_win_time_start() u64 { pub fn sys_mono_now() u64 { tm := u64(0) C.QueryPerformanceCounter(voidptr(&tm)) // XP or later never fail - return (tm - time.start_time) * 1000000000 / time.freq_time + return (tm - start_time) * 1000000000 / freq_time } // Note: vpc_now is used by `v -profile` . diff --git a/vlib/toml/checker/checker.v b/vlib/toml/checker/checker.v index e5789edb585bda..665c36085c958b 100644 --- a/vlib/toml/checker/checker.v +++ b/vlib/toml/checker/checker.v @@ -462,7 +462,7 @@ fn (c Checker) check_quoted_escapes(q ast.Quoted) ! { continue } } - if next_ch !in checker.allowed_basic_escape_chars { + if next_ch !in allowed_basic_escape_chars { st := s.state() return error(@MOD + '.' + @STRUCT + '.' + @FN + ' unknown basic string escape character `${next_ch.ascii_str()}` in `${escape}` (${st.line_nr},${st.col}) in ...${c.excerpt(q.pos)}...') @@ -507,7 +507,7 @@ fn (c Checker) check_utf8_validity(q ast.Quoted) ! { // Any preludes or prefixes like `0x` could pontentially yield wrong results. fn validate_utf8_codepoint_string(str string) ! { int_val := strconv.parse_int(str, 16, 64) or { i64(-1) } - if int_val > checker.utf8_max || int_val < 0 { + if int_val > utf8_max || int_val < 0 { return error('Unicode code point `${str}` is outside the valid Unicode scalar value ranges.') } // Check if the Unicode value is actually in the valid Unicode scalar value ranges. diff --git a/vlib/toml/decoder/decoder.v b/vlib/toml/decoder/decoder.v index dcc15cbc38eb40..3eeecc2e42f3c6 100644 --- a/vlib/toml/decoder/decoder.v +++ b/vlib/toml/decoder/decoder.v @@ -161,13 +161,13 @@ pub fn decode_quoted_escapes(mut q ast.Quoted) ! { decoded_s += escape continue } - if unicode_val > decoder.utf8_max || unicode_val < 0 { + if unicode_val > utf8_max || unicode_val < 0 { decoded_s += escape continue } // Check if the Unicode value is actually in the valid Unicode scalar value ranges. if !((unicode_val >= 0x0000 && unicode_val <= 0xD7FF) - || (unicode_val >= 0xE000 && unicode_val <= decoder.utf8_max)) { + || (unicode_val >= 0xE000 && unicode_val <= utf8_max)) { decoded_s += escape continue } diff --git a/vlib/toml/parser/parser.v b/vlib/toml/parser/parser.v index 8d3384843fbd93..6a5c3ba0d4a51c 100644 --- a/vlib/toml/parser/parser.v +++ b/vlib/toml/parser/parser.v @@ -186,7 +186,7 @@ fn (mut p Parser) check(check_token token.Kind) ! { // and return an error if the next token is not one of [.cr, .nl, .hash, .eof]. fn (mut p Parser) peek_for_correct_line_ending_or_fail() ! { // Disallow anything else than [.cr, .nl, .hash, .eof] after any space formatting. - peek_tok, _ := p.peek_over(1, parser.space_formatting)! + peek_tok, _ := p.peek_over(1, space_formatting)! if peek_tok.kind !in [.cr, .nl, .hash, .eof] { p.next()! // Forward to the peek_tok return error(@MOD + '.' + @STRUCT + '.' + @FN + @@ -438,15 +438,15 @@ pub fn (mut p Parser) dotted_key() !DottedKey { util.printdbg(@MOD + '.' + @STRUCT + '.' + @FN, 'parsing dotted key...') mut dotted_key := DottedKey([]string{}) key := p.key()! - p.ignore_while_peek(parser.space_formatting) + p.ignore_while_peek(space_formatting) dotted_key << key.str() for p.peek_tok.kind == .period { p.next()! // . p.check(.period)! - p.ignore_while(parser.space_formatting) + p.ignore_while(space_formatting) next_key := p.key()! dotted_key << next_key.text - p.ignore_while_peek(parser.space_formatting) + p.ignore_while_peek(space_formatting) } p.next()! util.printdbg(@MOD + '.' + @STRUCT + '.' + @FN, 'parsed dotted key `${dotted_key}` now at "${p.tok.kind}" "${p.tok.lit}"') @@ -478,7 +478,7 @@ pub fn (mut p Parser) root_table() ! { } .bare, .quoted, .number, .minus, .underscore { // Peek forward as far as we can skipping over space formatting tokens. - peek_tok, _ := p.peek_over(1, parser.keys_and_space_formatting)! + peek_tok, _ := p.peek_over(1, keys_and_space_formatting)! if peek_tok.kind == .period { dotted_key, val := p.dotted_key_value()! @@ -522,7 +522,7 @@ pub fn (mut p Parser) root_table() ! { t[key.str()] = val } } else { - p.ignore_while(parser.space_formatting) + p.ignore_while(space_formatting) key, val := p.key_value()! t := p.find_table()! @@ -543,8 +543,8 @@ pub fn (mut p Parser) root_table() ! { mut peek_tok := p.peek_tok // Disallow `[ [table]]` - if p.tok.kind in parser.space_formatting { - peek_tok, _ = p.peek_over(1, parser.space_formatting)! + if p.tok.kind in space_formatting { + peek_tok, _ = p.peek_over(1, space_formatting)! if peek_tok.kind == .lsbr { return error(@MOD + '.' + @STRUCT + '.' + @FN + ' unexpected "${p.tok.kind}" "${p.tok.lit}" at this (excerpt): "...${p.excerpt()}..."') @@ -552,10 +552,10 @@ pub fn (mut p Parser) root_table() ! { } // Allow `[ d.e.f]` - p.ignore_while(parser.space_formatting) + p.ignore_while(space_formatting) // Peek forward as far as we can skipping over space formatting tokens. - peek_tok, _ = p.peek_over(1, parser.keys_and_space_formatting)! + peek_tok, _ = p.peek_over(1, keys_and_space_formatting)! if p.tok.kind == .lsbr { // Parse `[[table]]` @@ -581,7 +581,7 @@ pub fn (mut p Parser) root_table() ! { // Disallow re-declaring the key p.check_explicitly_declared_array_of_tables(dotted_key)! p.check(.rsbr)! - p.ignore_while(parser.space_formatting) + p.ignore_while(space_formatting) arr := p.find_array_of_tables()! if val := arr[p.last_aot_index] { if val is map[string]ast.Value { @@ -618,7 +618,7 @@ pub fn (mut p Parser) root_table() ! { // ... also check implicitly declared keys p.check_implicitly_declared(dotted_key)! - p.ignore_while(parser.space_formatting) + p.ignore_while(space_formatting) util.printdbg(@MOD + '.' + @STRUCT + '.' + @FN, 'setting root map key to `${dotted_key}` at "${p.tok.kind}" "${p.tok.lit}"') p.root_map_key = dotted_key @@ -643,7 +643,7 @@ pub fn (mut p Parser) root_table() ! { } // Allow [ key ] - p.ignore_while(parser.space_formatting) + p.ignore_while(space_formatting) util.printdbg(@MOD + '.' + @STRUCT + '.' + @FN, 'setting root map key to `${dotted_key}` at "${p.tok.kind}" "${p.tok.lit}"') p.root_map_key = dotted_key @@ -697,7 +697,7 @@ pub fn (mut p Parser) table_contents(mut tbl map[string]ast.Value) ! { } .bare, .quoted, .number, .minus, .underscore { // Peek forward as far as we can skipping over space formatting tokens. - peek_tok, _ := p.peek_over(1, parser.keys_and_space_formatting)! + peek_tok, _ := p.peek_over(1, keys_and_space_formatting)! if peek_tok.kind == .period { dotted_key, val := p.dotted_key_value()! @@ -710,7 +710,7 @@ pub fn (mut p Parser) table_contents(mut tbl map[string]ast.Value) ! { t[key.str()] = val } } else { - p.ignore_while(parser.space_formatting) + p.ignore_while(space_formatting) key, val := p.key_value()! unsafe { @@ -747,7 +747,7 @@ pub fn (mut p Parser) inline_table(mut tbl map[string]ast.Value) ! { util.printdbg(@MOD + '.' + @STRUCT + '.' + @FN, 'parsing token "${p.tok.kind}"') if previous_token_was_value { - p.ignore_while(parser.space_formatting) + p.ignore_while(space_formatting) if p.tok.kind != .rcbr { p.expect(.comma)! } @@ -765,7 +765,7 @@ pub fn (mut p Parser) inline_table(mut tbl map[string]ast.Value) ! { continue } .comma { - p.ignore_while_peek(parser.space_formatting) + p.ignore_while_peek(space_formatting) if p.peek_tok.kind in [.comma, .rcbr] { p.next()! // Forward to the peek_tok return error(@MOD + '.' + @STRUCT + '.' + @FN + @@ -780,7 +780,7 @@ pub fn (mut p Parser) inline_table(mut tbl map[string]ast.Value) ! { } .bare, .quoted, .number, .minus, .underscore { // Peek forward as far as we can skipping over space formatting tokens. - peek_tok, _ := p.peek_over(1, parser.space_formatting)! + peek_tok, _ := p.peek_over(1, space_formatting)! if peek_tok.kind == .period { dotted_key, val := p.dotted_key_value()! @@ -793,7 +793,7 @@ pub fn (mut p Parser) inline_table(mut tbl map[string]ast.Value) ! { t[key.str()] = val } } else { - p.ignore_while(parser.space_formatting) + p.ignore_while(space_formatting) key, val := p.key_value()! key_str := key.str() if _ := tbl[key_str] { @@ -824,9 +824,9 @@ pub fn (mut p Parser) array_of_tables(mut table map[string]ast.Value) ! { p.check(.lsbr)! // '[' bracket // Allow [[ key]] - p.ignore_while(parser.space_formatting) - peek_tok, _ := p.peek_over(1, parser.space_formatting)! - p.ignore_while(parser.space_formatting) + p.ignore_while(space_formatting) + peek_tok, _ := p.peek_over(1, space_formatting)! + p.ignore_while(space_formatting) // [[key.key]] horror if peek_tok.kind == .period { @@ -838,13 +838,13 @@ pub fn (mut p Parser) array_of_tables(mut table map[string]ast.Value) ! { p.next()! // Allow [[key ]] - p.ignore_while(parser.space_formatting) + p.ignore_while(space_formatting) p.check(.rsbr)! p.peek_for_correct_line_ending_or_fail()! p.expect(.rsbr)! - p.ignore_while(parser.all_formatting) + p.ignore_while(all_formatting) dotted_key := DottedKey([key.str()]) dotted_key_str := dotted_key.str() @@ -893,12 +893,12 @@ pub fn (mut p Parser) double_array_of_tables(mut table map[string]ast.Value) ! { util.printdbg(@MOD + '.' + @STRUCT + '.' + @FN, 'parsing nested array of tables "${p.tok.kind}" "${p.tok.lit}"') dotted_key := p.dotted_key()! - p.ignore_while(parser.space_formatting) + p.ignore_while(space_formatting) p.check(.rsbr)! p.expect(.rsbr)! - p.ignore_while(parser.all_formatting) + p.ignore_while(all_formatting) if dotted_key.len != 2 { return error(@MOD + '.' + @STRUCT + '.' + @FN + @@ -988,13 +988,13 @@ pub fn (mut p Parser) double_array_of_tables_contents(target_key DottedKey) ![]a for p.tok.kind != .eof { p.next()! util.printdbg(@MOD + '.' + @STRUCT + '.' + @FN, 'parsing token "${p.tok.kind}"') - p.ignore_while(parser.all_formatting) + p.ignore_while(all_formatting) // Peek forward as far as we can skipping over space formatting tokens. - peek_tok, peeked_over = p.peek_over(1, parser.space_formatting)! + peek_tok, peeked_over = p.peek_over(1, space_formatting)! // Peek for occurrence of `[[` if peek_tok.kind == .lsbr { - peek_tok, peeked_over = p.peek_over(peeked_over + 1, parser.space_formatting)! + peek_tok, peeked_over = p.peek_over(peeked_over + 1, space_formatting)! if peek_tok.kind == .lsbr { mut arr := []ast.Value{} arr << tbl @@ -1005,7 +1005,7 @@ pub fn (mut p Parser) double_array_of_tables_contents(target_key DottedKey) ![]a match p.tok.kind { .bare, .quoted, .number, .minus, .underscore { // Peek forward as far as we can skipping over space formatting tokens. - peek_tok, _ = p.peek_over(1, parser.space_formatting)! + peek_tok, _ = p.peek_over(1, space_formatting)! if peek_tok.kind == .period { mut dotted_key, val := p.dotted_key_value()! @@ -1038,20 +1038,20 @@ pub fn (mut p Parser) double_array_of_tables_contents(target_key DottedKey) ![]a peek_tok = p.peek_tok // Allow `[ d.e.f]` - p.ignore_while(parser.space_formatting) + p.ignore_while(space_formatting) // Peek forward as far as we can skipping over space formatting tokens. - peek_tok, _ = p.peek_over(1, parser.space_formatting)! + peek_tok, _ = p.peek_over(1, space_formatting)! if peek_tok.kind == .period { // Parse `[d.e.f]` - p.ignore_while(parser.space_formatting) + p.ignore_while(space_formatting) dotted_key := p.dotted_key()! implicit_allocation_key = dotted_key if dotted_key.len > 2 { implicit_allocation_key = dotted_key[2..] } - p.ignore_while(parser.space_formatting) + p.ignore_while(space_formatting) util.printdbg(@MOD + '.' + @STRUCT + '.' + @FN, 'keys are: dotted `${dotted_key}`, target `${target_key}`, implicit `${implicit_allocation_key}` at "${p.tok.kind}" "${p.tok.lit}"') p.expect(.rsbr)! p.peek_for_correct_line_ending_or_fail()! @@ -1084,7 +1084,7 @@ pub fn (mut p Parser) array() ![]ast.Value { util.printdbg(@MOD + '.' + @STRUCT + '.' + @FN, 'parsing token "${p.tok.kind}" "${p.tok.lit}"') if previous_token_was_value { - p.ignore_while(parser.all_formatting) + p.ignore_while(all_formatting) if p.tok.kind != .rsbr && p.tok.kind != .hash { p.expect(.comma)! } @@ -1097,7 +1097,7 @@ pub fn (mut p Parser) array() ![]ast.Value { previous_token_was_value = true } .comma { - p.ignore_while_peek(parser.space_formatting) + p.ignore_while_peek(space_formatting) // Trailing commas before array close is allowed // so we do not do `if p.peek_tok.kind == .rsbr { ... }` @@ -1120,7 +1120,7 @@ pub fn (mut p Parser) array() ![]ast.Value { util.printdbg(@MOD + '.' + @STRUCT + '.' + @FN, 'skipping comment "${c.text}"') } .lcbr { - p.ignore_while(parser.space_formatting) + p.ignore_while(space_formatting) mut t := map[string]ast.Value{} p.inline_table(mut t)! arr << ast.Value(t) @@ -1178,7 +1178,7 @@ pub fn (mut p Parser) key() !ast.Key { pos := p.tok.pos() for p.peek_tok.kind != .assign && p.peek_tok.kind != .period && p.peek_tok.kind != .rsbr { p.next()! - if p.tok.kind !in parser.space_formatting { + if p.tok.kind !in space_formatting { lits += p.tok.lit } } @@ -1246,9 +1246,9 @@ pub fn (mut p Parser) key_value() !(ast.Key, ast.Value) { util.printdbg(@MOD + '.' + @STRUCT + '.' + @FN, 'parsing key value pair...') key := p.key()! p.next()! - p.ignore_while(parser.space_formatting) + p.ignore_while(space_formatting) p.check(.assign)! // Assignment operator - p.ignore_while(parser.space_formatting) + p.ignore_while(space_formatting) value := p.value()! util.printdbg(@MOD + '.' + @STRUCT + '.' + @FN, 'parsed key value pair. `${key} = ${value}`') @@ -1263,11 +1263,11 @@ pub fn (mut p Parser) key_value() !(ast.Key, ast.Value) { // see also `key()` and `value()` pub fn (mut p Parser) dotted_key_value() !(DottedKey, ast.Value) { util.printdbg(@MOD + '.' + @STRUCT + '.' + @FN, 'parsing dotted key value pair...') - p.ignore_while(parser.space_formatting) + p.ignore_while(space_formatting) dotted_key := p.dotted_key()! - p.ignore_while(parser.space_formatting) + p.ignore_while(space_formatting) p.check(.assign)! - p.ignore_while(parser.space_formatting) + p.ignore_while(space_formatting) value := p.value()! util.printdbg(@MOD + '.' + @STRUCT + '.' + @FN, 'parsed dotted key value pair `${dotted_key} = ${value}`...') @@ -1297,7 +1297,7 @@ pub fn (mut p Parser) value() !ast.Value { ast.Value(p.array()!) } .lcbr { - p.ignore_while(parser.space_formatting) + p.ignore_while(space_formatting) mut t := map[string]ast.Value{} p.inline_table(mut t)! ast.Value(t) @@ -1341,7 +1341,7 @@ pub fn (mut p Parser) bare() !ast.Bare { mut lits := p.tok.lit pos := p.tok.pos() for p.peek_tok.kind != .assign && p.peek_tok.kind != .period && p.peek_tok.kind != .rsbr - && p.peek_tok.kind !in parser.space_formatting { + && p.peek_tok.kind !in space_formatting { p.next()! if p.tok.kind == .bare || p.tok.kind == .minus || p.tok.kind == .underscore { lits += p.tok.lit diff --git a/vlib/toml/scanner/scanner.v b/vlib/toml/scanner/scanner.v index d2670684020556..b0d35d89ece407 100644 --- a/vlib/toml/scanner/scanner.v +++ b/vlib/toml/scanner/scanner.v @@ -99,7 +99,7 @@ pub fn (mut s Scanner) scan() !token.Token { for { c := s.next() byte_c := u8(c) - if c == scanner.end_of_text { + if c == end_of_text { s.inc_line_number() util.printdbg(@MOD + '.' + @STRUCT + '.' + @FN, 'reached EOF') return s.new_token(.eof, '', 1) @@ -268,7 +268,7 @@ pub fn (mut s Scanner) next() u32 { c := s.text[opos] return c } - return scanner.end_of_text + return end_of_text } // skip skips one character ahead. @@ -300,7 +300,7 @@ pub fn (s &Scanner) at() u32 { if s.pos < s.text.len { return s.text[s.pos] } - return scanner.end_of_text + return end_of_text } // at_crlf returns `true` if the scanner is at a `\r` character @@ -321,7 +321,7 @@ pub fn (s &Scanner) peek(n int) u32 { } return s.text[s.pos + n] } - return scanner.end_of_text + return end_of_text } // reset resets the internal state of the scanner. @@ -355,7 +355,7 @@ fn (mut s Scanner) new_token(kind token.Kind, lit string, len int) token.Token { fn (mut s Scanner) ignore_line() !string { util.printdbg(@MOD + '.' + @STRUCT + '.' + @FN, ' ignoring until EOL...') start := s.pos - for c := s.at(); c != scanner.end_of_text && c != `\n`; c = s.at() { + for c := s.at(); c != end_of_text && c != `\n`; c = s.at() { util.printdbg(@MOD + '.' + @STRUCT + '.' + @FN, 'skipping "${u8(c).ascii_str()} / ${c}"') if s.at_crlf() { util.printdbg(@MOD + '.' + @STRUCT + '.' + @FN, 'letting `\\r\\n` slip through') @@ -510,7 +510,7 @@ fn (mut s Scanner) extract_multiline_string() !string { if c == quote { if s.peek(1) == quote && s.peek(2) == quote { - if s.peek(3) == scanner.end_of_text { + if s.peek(3) == end_of_text { s.pos += 3 s.col += 3 lit += quote.ascii_str() + quote.ascii_str() + quote.ascii_str() @@ -593,7 +593,7 @@ fn (mut s Scanner) extract_number() !string { mut float_precision := 0 if c == `.` { mut i := 1 - for c_ := u8(s.peek(i)); c_ != scanner.end_of_text && c_ != `\n`; c_ = u8(s.peek(i)) { + for c_ := u8(s.peek(i)); c_ != end_of_text && c_ != `\n`; c_ = u8(s.peek(i)) { if !c_.is_digit() && c_ != `,` { float_precision = 0 break @@ -610,7 +610,7 @@ fn (mut s Scanner) extract_number() !string { s.col += 2 } c = s.at() - if !(u8(c).is_hex_digit() || c in scanner.digit_extras) || (c == `.` && s.is_left_of_assign) { + if !(u8(c).is_hex_digit() || c in digit_extras) || (c == `.` && s.is_left_of_assign) { break } s.pos++ diff --git a/vlib/toml/toml.v b/vlib/toml/toml.v index 71db616893be22..51ffa45276cbdb 100644 --- a/vlib/toml/toml.v +++ b/vlib/toml/toml.v @@ -414,7 +414,7 @@ pub fn (d Doc) reflect[T]() T { // quoted keys are supported as `a."b.c"` or `a.'b.c'`. // Arrays can be queried with `a[0].b[1].[2]`. pub fn (d Doc) value(key string) Any { - key_split := parse_dotted_key(key) or { return toml.null } + key_split := parse_dotted_key(key) or { return null } return d.value_(d.ast.table, key_split) } @@ -434,21 +434,21 @@ pub fn (d Doc) value_opt(key string) !Any { // value_ returns the value found at `key` in the map `values` as `Any` type. fn (d Doc) value_(value ast.Value, key []string) Any { if key.len == 0 { - return toml.null + return null } mut ast_value := ast.Value(ast.Null{}) k, index := parse_array_key(key[0]) if k == '' { a := value as []ast.Value - ast_value = a[index] or { return toml.null } + ast_value = a[index] or { return null } } if value is map[string]ast.Value { - ast_value = value[k] or { return toml.null } + ast_value = value[k] or { return null } if index > -1 { a := ast_value as []ast.Value - ast_value = a[index] or { return toml.null } + ast_value = a[index] or { return null } } } @@ -526,11 +526,11 @@ pub fn ast_to_any(value ast.Value) Any { return aa } else { - return toml.null + return null } } - return toml.null + return null // TODO: decide this // panic(@MOD + '.' + @STRUCT + '.' + @FN + ' can\'t convert "$value"') // return Any('') diff --git a/vlib/v/ast/ast.v b/vlib/v/ast/ast.v index 51a6a7c99315ed..44833021c59bcd 100644 --- a/vlib/v/ast/ast.v +++ b/vlib/v/ast/ast.v @@ -2108,7 +2108,7 @@ const max_nested_expr_pos_calls = 5000 pub fn (expr Expr) pos() token.Pos { pos_calls := stdatomic.add_i64(&nested_expr_pos_calls, 1) - if pos_calls > ast.max_nested_expr_pos_calls { + if pos_calls > max_nested_expr_pos_calls { $if panic_on_deeply_nested_expr_pos_calls ? { eprintln('${@LOCATION}: too many nested Expr.pos() calls: ${pos_calls}, expr type: ${expr.type_name()}') exit(1) @@ -2491,7 +2491,7 @@ pub fn all_registers(mut t Table, arch pref.Arch) map[string]ScopeObject { return all_registers(mut t, .amd64) } .amd64, .i386 { - for bit_size, array in ast.x86_no_number_register_list { + for bit_size, array in x86_no_number_register_list { for name in array { res[name] = AsmRegister{ name: name @@ -2500,7 +2500,7 @@ pub fn all_registers(mut t Table, arch pref.Arch) map[string]ScopeObject { } } } - for bit_size, array in ast.x86_with_number_register_list { + for bit_size, array in x86_with_number_register_list { for name, max_num in array { for i in 0 .. max_num { hash_index := name.index('#') or { @@ -2517,28 +2517,28 @@ pub fn all_registers(mut t Table, arch pref.Arch) map[string]ScopeObject { } } .arm32 { - arm32 := gen_all_registers(mut t, ast.arm_no_number_register_list, ast.arm_with_number_register_list, + arm32 := gen_all_registers(mut t, arm_no_number_register_list, arm_with_number_register_list, 32) for k, v in arm32 { res[k] = v } } .arm64 { - arm64 := gen_all_registers(mut t, ast.arm_no_number_register_list, ast.arm_with_number_register_list, + arm64 := gen_all_registers(mut t, arm_no_number_register_list, arm_with_number_register_list, 64) for k, v in arm64 { res[k] = v } } .rv32 { - rv32 := gen_all_registers(mut t, ast.riscv_no_number_register_list, ast.riscv_with_number_register_list, + rv32 := gen_all_registers(mut t, riscv_no_number_register_list, riscv_with_number_register_list, 32) for k, v in rv32 { res[k] = v } } .rv64 { - rv64 := gen_all_registers(mut t, ast.riscv_no_number_register_list, ast.riscv_with_number_register_list, + rv64 := gen_all_registers(mut t, riscv_no_number_register_list, riscv_with_number_register_list, 64) for k, v in rv64 { res[k] = v diff --git a/vlib/v/ast/comptime_valid_idents.v b/vlib/v/ast/comptime_valid_idents.v index 8d52d51dfef192..e7aa530ae8fa7c 100644 --- a/vlib/v/ast/comptime_valid_idents.v +++ b/vlib/v/ast/comptime_valid_idents.v @@ -16,10 +16,10 @@ pub const valid_comptime_compression_types = ['none', 'zlib'] fn all_valid_comptime_idents() []string { mut res := []string{} - res << ast.valid_comptime_if_os - res << ast.valid_comptime_if_compilers - res << ast.valid_comptime_if_platforms - res << ast.valid_comptime_if_cpu_features - res << ast.valid_comptime_if_other + res << valid_comptime_if_os + res << valid_comptime_if_compilers + res << valid_comptime_if_platforms + res << valid_comptime_if_cpu_features + res << valid_comptime_if_other return res } diff --git a/vlib/v/ast/str.v b/vlib/v/ast/str.v index fdbc6560dabab6..2e95bfe188db6e 100644 --- a/vlib/v/ast/str.v +++ b/vlib/v/ast/str.v @@ -397,7 +397,7 @@ const max_nested_expr_str_calls = 300 // string representation of expr pub fn (x &Expr) str() string { str_calls := stdatomic.add_i64(&nested_expr_str_calls, 1) - if str_calls > ast.max_nested_expr_str_calls { + if str_calls > max_nested_expr_str_calls { $if panic_on_deeply_nested_expr_str_calls ? { eprintln('${@LOCATION}: too many nested Expr.str() calls: ${str_calls}, expr type: ${x.type_name()}') exit(1) diff --git a/vlib/v/ast/table.v b/vlib/v/ast/table.v index e74f3afd3d71a3..17573128a40bea 100644 --- a/vlib/v/ast/table.v +++ b/vlib/v/ast/table.v @@ -656,7 +656,7 @@ pub fn (t &Table) find_sym_and_type_idx(name string) (&TypeSymbol, int) { if idx > 0 { return t.type_symbols[idx], idx } - return ast.invalid_type_symbol, idx + return invalid_type_symbol, idx } pub const invalid_type_symbol = &TypeSymbol{ @@ -683,7 +683,7 @@ pub fn (t &Table) sym(typ Type) &TypeSymbol { // this should never happen t.panic('table.sym: invalid type (typ=${typ} idx=${idx}). Compiler bug. This should never happen. Please report the bug using `v bug file.v`. ') - return ast.invalid_type_symbol + return invalid_type_symbol } // final_sym follows aliases until it gets to a "real" Type @@ -699,7 +699,7 @@ pub fn (t &Table) final_sym(typ Type) &TypeSymbol { } // this should never happen t.panic('table.final_sym: invalid type (typ=${typ} idx=${idx}). Compiler bug. This should never happen. Please report the bug using `v bug file.v`.') - return ast.invalid_type_symbol + return invalid_type_symbol } @[inline] @@ -869,7 +869,7 @@ pub fn (t &Table) array_cname(elem_type Type) string { opt := if elem_type.has_flag(.option) { '_option_' } else { '' } res := if elem_type.has_flag(.result) { '_result_' } else { '' } if elem_type_sym.cname.contains('[') { - type_name := elem_type_sym.cname.replace_each(ast.map_cname_escape_seq) + type_name := elem_type_sym.cname.replace_each(map_cname_escape_seq) return 'Array_${opt}${res}${type_name}${suffix}' } else { return 'Array_${opt}${res}${elem_type_sym.cname}${suffix}' @@ -899,7 +899,7 @@ pub fn (t &Table) array_fixed_cname(elem_type Type, size int) string { opt := if elem_type.has_flag(.option) { '_option_' } else { '' } res := if elem_type.has_flag(.result) { '_result_' } else { '' } if elem_type_sym.cname.contains('[') { - type_name := elem_type_sym.cname.replace_each(ast.map_cname_escape_seq) + type_name := elem_type_sym.cname.replace_each(map_cname_escape_seq) return 'Array_fixed_${opt}${res}${type_name}${suffix}_${size}' } else { return 'Array_fixed_${opt}${res}${elem_type_sym.cname}${suffix}_${size}' @@ -928,7 +928,7 @@ pub fn (t &Table) chan_cname(elem_type Type, is_mut bool) string { suffix = '_ptr' } type_name := if elem_type_sym.cname.contains('[') { - elem_type_sym.cname.replace_each(ast.map_cname_escape_seq) + elem_type_sym.cname.replace_each(map_cname_escape_seq) } else { elem_type_sym.cname } @@ -1011,7 +1011,7 @@ pub fn (t &Table) map_cname(key_type Type, value_type Type) string { opt := if value_type.has_flag(.option) { '_option_' } else { '' } res := if value_type.has_flag(.result) { '_result_' } else { '' } if value_type_sym.cname.contains('[') { - type_name := value_type_sym.cname.replace_each(ast.map_cname_escape_seq) + type_name := value_type_sym.cname.replace_each(map_cname_escape_seq) return 'Map_${key_type_sym.cname}_${opt}${res}${type_name}${suffix}' } else { return 'Map_${key_type_sym.cname}_${opt}${res}${value_type_sym.cname}${suffix}' @@ -1199,7 +1199,7 @@ pub fn (mut t Table) find_or_register_fn_type(f Fn, is_anon bool, has_decl bool) cname := if f.name == '' { 'anon_fn_${t.fn_type_signature(f)}' } else { - util.no_dots(f.name.clone()).replace_each(ast.fn_type_escape_seq) + util.no_dots(f.name.clone()).replace_each(fn_type_escape_seq) } anon := f.name == '' || is_anon existing_idx := t.type_idxs[name] diff --git a/vlib/v/ast/types.v b/vlib/v/ast/types.v index af5d7b9e842ab9..18e228ff5752c0 100644 --- a/vlib/v/ast/types.v +++ b/vlib/v/ast/types.v @@ -274,11 +274,11 @@ pub mut: pub fn (t Type) atomic_typename() string { idx := t.idx() match idx { - ast.u32_type_idx { return 'atomic_uint' } - ast.int_type_idx { return '_Atomic int' } - ast.i32_type_idx { return '_Atomic int' } - ast.u64_type_idx { return 'atomic_ullong' } - ast.i64_type_idx { return 'atomic_llong' } + u32_type_idx { return 'atomic_uint' } + int_type_idx { return '_Atomic int' } + i32_type_idx { return '_Atomic int' } + u64_type_idx { return 'atomic_ullong' } + i64_type_idx { return 'atomic_llong' } else { return 'unknown_atomic' } } } @@ -300,13 +300,13 @@ pub fn (t Type) idx() int { // is_void return true if `t` is of type `void` @[inline] pub fn (t Type) is_void() bool { - return t == ast.void_type + return t == void_type } // is_full return true if `t` is not of type `void` @[inline] pub fn (t Type) is_full() bool { - return t != 0 && t != ast.void_type + return t != 0 && t != void_type } // return nr_muls for `t` @@ -327,19 +327,19 @@ pub fn (t Type) is_ptr() bool { @[inline] pub fn (typ Type) is_pointer() bool { // builtin pointer types (voidptr, byteptr, charptr) - return typ.idx() in ast.pointer_type_idxs + return typ.idx() in pointer_type_idxs } // is_voidptr returns true if `typ` is a voidptr @[inline] pub fn (typ Type) is_voidptr() bool { - return typ.idx() == ast.voidptr_type_idx + return typ.idx() == voidptr_type_idx } // is_any_kind_of_pointer returns true if t is any type of pointer @[inline] pub fn (t Type) is_any_kind_of_pointer() bool { - return (t >> 16) & 0xff != 0 || (u16(t) & 0xffff) in ast.pointer_type_idxs + return (t >> 16) & 0xff != 0 || (u16(t) & 0xffff) in pointer_type_idxs } // set nr_muls on `t` and return it @@ -513,64 +513,64 @@ pub fn new_type_ptr(idx int, nr_muls int) Type { // is_float returns `true` if `typ` is float @[inline] pub fn (typ Type) is_float() bool { - return !typ.is_ptr() && typ.idx() in ast.float_type_idxs + return !typ.is_ptr() && typ.idx() in float_type_idxs } // is_int returns `true` if `typ` is int @[inline] pub fn (typ Type) is_int() bool { - return !typ.is_ptr() && typ.idx() in ast.integer_type_idxs + return !typ.is_ptr() && typ.idx() in integer_type_idxs } // is_int_valptr returns `true` if `typ` is a pointer to a int @[inline] pub fn (typ Type) is_int_valptr() bool { - return typ.is_ptr() && typ.idx() in ast.integer_type_idxs + return typ.is_ptr() && typ.idx() in integer_type_idxs } // is_float_valptr return `true` if `typ` is a pointer to float @[inline] pub fn (typ Type) is_float_valptr() bool { - return typ.is_ptr() && typ.idx() in ast.float_type_idxs + return typ.is_ptr() && typ.idx() in float_type_idxs } // is_pure_int return `true` if `typ` is a pure int @[inline] pub fn (typ Type) is_pure_int() bool { - return int(typ) in ast.integer_type_idxs + return int(typ) in integer_type_idxs } // is_pure_float return `true` if `typ` is a pure float @[inline] pub fn (typ Type) is_pure_float() bool { - return int(typ) in ast.float_type_idxs + return int(typ) in float_type_idxs } // is_signed return `true` if `typ` is signed @[inline] pub fn (typ Type) is_signed() bool { - return typ.idx() in ast.signed_integer_type_idxs + return typ.idx() in signed_integer_type_idxs } // is_unsigned return `true` if `typ` is unsigned @[inline] pub fn (typ Type) is_unsigned() bool { - return typ.idx() in ast.unsigned_integer_type_idxs + return typ.idx() in unsigned_integer_type_idxs } pub fn (typ Type) flip_signedness() Type { return match typ { - ast.i8_type { ast.u8_type } - ast.i16_type { ast.u16_type } - ast.int_type { ast.u32_type } - ast.i32_type { ast.u32_type } - ast.isize_type { ast.usize_type } - ast.i64_type { ast.u64_type } - ast.u8_type { ast.i8_type } - ast.u16_type { ast.i16_type } - ast.u32_type { ast.int_type } - ast.usize_type { ast.isize_type } - ast.u64_type { ast.i64_type } + i8_type { u8_type } + i16_type { u16_type } + int_type { u32_type } + i32_type { u32_type } + isize_type { usize_type } + i64_type { u64_type } + u8_type { i8_type } + u16_type { i16_type } + u32_type { int_type } + usize_type { isize_type } + u64_type { i64_type } else { typ } } } @@ -578,25 +578,25 @@ pub fn (typ Type) flip_signedness() Type { // is_int_literal returns `true` if `typ` is a int literal @[inline] pub fn (typ Type) is_int_literal() bool { - return int(typ) == ast.int_literal_type_idx + return int(typ) == int_literal_type_idx } // is_number returns `true` if `typ` is a number @[inline] pub fn (typ Type) is_number() bool { - return typ.clear_flags() in ast.number_type_idxs + return typ.clear_flags() in number_type_idxs } // is_string returns `true` if `typ` is a string type @[inline] pub fn (typ Type) is_string() bool { - return typ.idx() == ast.string_type_idx + return typ.idx() == string_type_idx } // is_bool returns `true` if `typ` is of bool type @[inline] pub fn (typ Type) is_bool() bool { - return typ.idx() == ast.bool_type_idx + return typ.idx() == bool_type_idx } pub const invalid_type_idx = -1 @@ -699,15 +699,15 @@ pub const cptr_types = merge_types(voidptr_types, byteptr_types, charptr_types) pub const nil_type = new_type(nil_type_idx) fn new_charptr_types() []Type { - return [ast.charptr_type, new_type(ast.char_type_idx).set_nr_muls(1)] + return [charptr_type, new_type(char_type_idx).set_nr_muls(1)] } fn new_byteptr_types() []Type { - return [ast.byteptr_type, new_type(ast.u8_type_idx).set_nr_muls(1)] + return [byteptr_type, new_type(u8_type_idx).set_nr_muls(1)] } fn new_voidptr_types() []Type { - return [ast.voidptr_type, new_type(ast.voidptr_type_idx).set_nr_muls(1)] + return [voidptr_type, new_type(voidptr_type_idx).set_nr_muls(1)] } pub fn merge_types(params ...[]Type) []Type { @@ -724,8 +724,8 @@ pub fn merge_types(params ...[]Type) []Type { pub fn mktyp(typ Type) Type { return match typ { - ast.float_literal_type { ast.f64_type } - ast.int_literal_type { ast.int_type } + float_literal_type { f64_type } + int_literal_type { int_type } else { typ } } } @@ -1030,7 +1030,7 @@ pub fn (mut t Table) register_builtin_type_symbols() { cname: '__v_thread' mod: 'builtin' info: Thread{ - return_type: ast.void_type + return_type: void_type } ) // 29 t.register_sym(kind: .interface_, name: 'IError', cname: 'IError', mod: 'builtin') // 30 @@ -1085,7 +1085,7 @@ pub fn (t &TypeSymbol) is_builtin() bool { // type_size returns the size and alignment (in bytes) of `typ`, similarly to C's `sizeof()` and `alignof()`. pub fn (t &Table) type_size(typ Type) (int, int) { if typ.has_option_or_result() { - return t.type_size(ast.error_type_idx) + return t.type_size(error_type_idx) } if typ.nr_muls() > 0 { return t.pointer_size, t.pointer_size @@ -1265,7 +1265,7 @@ pub fn (t &Table) type_to_str(typ Type) string { // type name in code (for builtin) pub fn (t &Table) type_to_code(typ Type) string { match typ { - ast.int_literal_type, ast.float_literal_type { return t.sym(typ).kind.str() } + int_literal_type, float_literal_type { return t.sym(typ).kind.str() } else { return t.type_to_str_using_aliases(typ, map[string]string{}) } } } @@ -1341,7 +1341,7 @@ pub fn (t &Table) type_to_str_using_aliases(typ Type, import_aliases map[string] res = sym.kind.str() } .array { - if typ == ast.array_type { + if typ == array_type { res = 'array' return res } @@ -1397,7 +1397,7 @@ pub fn (t &Table) type_to_str_using_aliases(typ Type, import_aliases map[string] } } .map { - if int(typ) == ast.map_type_idx { + if int(typ) == map_type_idx { res = 'map' return res } @@ -1584,7 +1584,7 @@ pub fn (t &Table) fn_signature_using_aliases(func &Fn, import_aliases map[string } } sb.write_string(')') - if func.return_type != ast.void_type { + if func.return_type != void_type { sb.write_string(' ') sb.write_string(t.type_to_str_using_aliases(func.return_type, import_aliases)) } diff --git a/vlib/v/builder/cbuilder/parallel_cc.v b/vlib/v/builder/cbuilder/parallel_cc.v index 7d8172d0deda83..271522783ef247 100644 --- a/vlib/v/builder/cbuilder/parallel_cc.v +++ b/vlib/v/builder/cbuilder/parallel_cc.v @@ -60,8 +60,8 @@ fn parallel_cc(mut b builder.Builder, header string, res string, out_str string, pp.set_max_jobs(nthreads) pp.work_on_items(o_postfixes) eprintln('> C compilation on ${nthreads} threads, working on ${o_postfixes.len} files took: ${sw.elapsed().milliseconds()} ms') - link_cmd := '${os.quoted_path(cbuilder.cc_compiler)} -o ${os.quoted_path(b.pref.out_name)} out_0.o ${fnames.map(it.replace('.c', - '.o')).join(' ')} out_x.o -lpthread ${cbuilder.cc_ldflags}' + link_cmd := '${os.quoted_path(cc_compiler)} -o ${os.quoted_path(b.pref.out_name)} out_0.o ${fnames.map(it.replace('.c', + '.o')).join(' ')} out_x.o -lpthread ${cc_ldflags}' sw_link := time.new_stopwatch() link_res := os.execute(link_cmd) eprint_time('link_cmd', link_cmd, link_res, sw_link) @@ -70,7 +70,7 @@ fn parallel_cc(mut b builder.Builder, header string, res string, out_str string, fn build_parallel_o_cb(mut p pool.PoolProcessor, idx int, wid int) voidptr { postfix := p.get_item[string](idx) sw := time.new_stopwatch() - cmd := '${os.quoted_path(cbuilder.cc_compiler)} ${cbuilder.cc_cflags} -c -w -o out_${postfix}.o out_${postfix}.c' + cmd := '${os.quoted_path(cc_compiler)} ${cc_cflags} -c -w -o out_${postfix}.o out_${postfix}.c' res := os.execute(cmd) eprint_time('c cmd', cmd, res, sw) return unsafe { nil } diff --git a/vlib/v/builder/cc.v b/vlib/v/builder/cc.v index 9d65e4ee86e879..b252fc7bc0ee55 100644 --- a/vlib/v/builder/cc.v +++ b/vlib/v/builder/cc.v @@ -41,7 +41,7 @@ fn (mut v Builder) post_process_c_compiler_output(ccompiler string, res os.Resul } return } - for emsg_marker in [builder.c_verror_message_marker, 'error: include file '] { + for emsg_marker in [c_verror_message_marker, 'error: include file '] { if res.output.contains(emsg_marker) { emessage := res.output.all_after(emsg_marker).all_before('\n').all_before('\r').trim_right('\r\n') verror(emessage) @@ -315,8 +315,8 @@ fn (mut v Builder) setup_ccompiler_options(ccompiler string) { ccoptions.args << '-Wl,--export-all' ccoptions.args << '-Wl,--no-entry' } - if ccoptions.debug_mode && builder.current_os != 'windows' && v.pref.build_mode != .build_module { - if ccoptions.cc != .tcc && builder.current_os == 'macos' { + if ccoptions.debug_mode && current_os != 'windows' && v.pref.build_mode != .build_module { + if ccoptions.cc != .tcc && current_os == 'macos' { ccoptions.linker_flags << '-Wl,-export_dynamic' // clang for mac needs export_dynamic instead of -rdynamic } else { ccoptions.linker_flags << '-rdynamic' // needed for nicer symbolic backtraces @@ -438,9 +438,9 @@ fn (mut v Builder) setup_ccompiler_options(ccompiler string) { } if !v.pref.no_std { if v.pref.os == .linux { - ccoptions.source_args << '-std=${builder.c_std_gnu}' + ccoptions.source_args << '-std=${c_std_gnu}' } else { - ccoptions.source_args << '-std=${builder.c_std}' + ccoptions.source_args << '-std=${c_std}' } ccoptions.source_args << '-D_DEFAULT_SOURCE' } @@ -499,15 +499,15 @@ fn (v &Builder) thirdparty_object_args(ccoptions CcompilerOptions, middle []stri if !v.pref.no_std { if v.pref.os == .linux { if cpp_file { - all << '-std=${builder.cpp_std_gnu}' + all << '-std=${cpp_std_gnu}' } else { - all << '-std=${builder.c_std_gnu}' + all << '-std=${c_std_gnu}' } } else { if cpp_file { - all << '-std=${builder.cpp_std}' + all << '-std=${cpp_std}' } else { - all << '-std=${builder.c_std}' + all << '-std=${c_std}' } } all << '-D_DEFAULT_SOURCE' @@ -1001,8 +1001,8 @@ fn (mut c Builder) cc_windows_cross() { } else { args << cflags.c_options_after_target() } - if builder.current_os !in ['macos', 'linux', 'termux'] { - println(builder.current_os) + if current_os !in ['macos', 'linux', 'termux'] { + println(current_os) panic('your platform is not supported yet') } diff --git a/vlib/v/builder/msvc_windows.v b/vlib/v/builder/msvc_windows.v index 805b49f5975383..ad4e2b2a525b25 100644 --- a/vlib/v/builder/msvc_windows.v +++ b/vlib/v/builder/msvc_windows.v @@ -85,7 +85,7 @@ fn find_windows_kit_root_by_reg(target_arch string) !WindowsKit { $if windows { root_key := RegKey(0) path := 'SOFTWARE\\Microsoft\\Windows Kits\\Installed Roots' - rc := C.RegOpenKeyEx(builder.hkey_local_machine, path.to_wide(), 0, builder.key_query_value | builder.key_wow64_32key | builder.key_enumerate_sub_keys, + rc := C.RegOpenKeyEx(hkey_local_machine, path.to_wide(), 0, key_query_value | key_wow64_32key | key_enumerate_sub_keys, voidptr(&root_key)) if rc != 0 { @@ -447,7 +447,7 @@ fn (mut v Builder) build_thirdparty_obj_file_with_msvc(mod string, path string, // Instead of failing, just retry several times in this case. mut res := os.Result{} mut i := 0 - for i = 0; i < builder.thirdparty_obj_build_max_retries; i++ { + for i = 0; i < thirdparty_obj_build_max_retries; i++ { res = os.execute(cmd) if res.exit_code == 0 { break @@ -456,15 +456,15 @@ fn (mut v Builder) build_thirdparty_obj_file_with_msvc(mod string, path string, break } eprintln('---------------------------------------------------------------------') - eprintln(' msvc: failed to build a thirdparty object, try: ${i}/${builder.thirdparty_obj_build_max_retries}') + eprintln(' msvc: failed to build a thirdparty object, try: ${i}/${thirdparty_obj_build_max_retries}') eprintln(' cmd: ${cmd}') eprintln(' output:') eprintln(res.output) eprintln('---------------------------------------------------------------------') - time.sleep(builder.thirdparty_obj_build_retry_delay) + time.sleep(thirdparty_obj_build_retry_delay) } if res.exit_code != 0 { - verror('msvc: failed to build a thirdparty object after ${i}/${builder.thirdparty_obj_build_max_retries} retries, cmd: ${cmd}') + verror('msvc: failed to build a thirdparty object after ${i}/${thirdparty_obj_build_max_retries} retries, cmd: ${cmd}') } println(res.output) flush_stdout() diff --git a/vlib/v/cflag/cflags.v b/vlib/v/cflag/cflags.v index 3dff9e72fefa4f..772649d6b1d83f 100644 --- a/vlib/v/cflag/cflags.v +++ b/vlib/v/cflag/cflags.v @@ -30,9 +30,9 @@ pub fn (cf &CFlag) eval() string { x := cf.value[i] if x == `$` { remainder := cf.value[i..] - if remainder.starts_with(cflag.fexisting_literal) { - sparams := remainder[cflag.fexisting_literal.len + 1..].all_before(')') - i += sparams.len + cflag.fexisting_literal.len + 1 + if remainder.starts_with(fexisting_literal) { + sparams := remainder[fexisting_literal.len + 1..].all_before(')') + i += sparams.len + fexisting_literal.len + 1 svalues := sparams.replace(',', '\n').split_into_lines().map(it.trim('\t \'"')) // mut found_spath := '' for spath in svalues { diff --git a/vlib/v/checker/checker.v b/vlib/v/checker/checker.v index 4d662307f49fb4..4d7c70c3208e8f 100644 --- a/vlib/v/checker/checker.v +++ b/vlib/v/checker/checker.v @@ -361,7 +361,7 @@ pub fn (mut c Checker) check_files(ast_files []&ast.File) { // is needed when the generic type is auto inferred from the call argument. // we may have to loop several times, if there were more concrete types found. mut post_process_generic_fns_iterations := 0 - post_process_iterations_loop: for post_process_generic_fns_iterations <= checker.generic_fn_postprocess_iterations_cutoff_limit { + post_process_iterations_loop: for post_process_generic_fns_iterations <= generic_fn_postprocess_iterations_cutoff_limit { $if trace_post_process_generic_fns_loop ? { eprintln('>>>>>>>>> recheck_generic_fns loop iteration: ${post_process_generic_fns_iterations}') } @@ -755,7 +755,7 @@ and use a reference to the sum type instead: `var := &${node.name}(${variant_nam fn (mut c Checker) expand_iface_embeds(idecl &ast.InterfaceDecl, level int, iface_embeds []ast.InterfaceEmbedding) []ast.InterfaceEmbedding { // eprintln('> expand_iface_embeds: idecl.name: $idecl.name | level: $level | iface_embeds.len: $iface_embeds.len') - if level > checker.iface_level_cutoff_limit { + if level > iface_level_cutoff_limit { c.error('too many interface embedding levels: ${level}, for interface `${idecl.name}`', idecl.pos) return [] @@ -1738,7 +1738,7 @@ fn (mut c Checker) const_decl(mut node ast.ConstDecl) { node.pos) } for mut field in node.fields { - if checker.reserved_type_names_chk.matches(util.no_cur_mod(field.name, c.mod)) { + if reserved_type_names_chk.matches(util.no_cur_mod(field.name, c.mod)) { c.error('invalid use of reserved type `${field.name}` as a const name', field.pos) } // TODO: Check const name once the syntax is decided @@ -2660,7 +2660,7 @@ fn (mut c Checker) stmts_ending_with_expression(mut stmts []ast.Stmt, expected_o c.scope_returns = false return } - if c.stmt_level > checker.stmt_level_cutoff_limit { + if c.stmt_level > stmt_level_cutoff_limit { c.scope_returns = false c.error('checker: too many stmt levels: ${c.stmt_level} ', stmts[0].pos) return @@ -2726,7 +2726,7 @@ pub fn (mut c Checker) expr(mut node ast.Expr) ast.Type { c.expr_level-- } - if c.expr_level > checker.expr_level_cutoff_limit { + if c.expr_level > expr_level_cutoff_limit { c.error('checker: too many expr levels: ${c.expr_level} ', node.pos()) return ast.void_type } @@ -4931,7 +4931,7 @@ fn (mut c Checker) ensure_generic_type_specify_type_names(typ ast.Type, pos toke defer { c.ensure_generic_type_level-- } - if c.ensure_generic_type_level > checker.expr_level_cutoff_limit { + if c.ensure_generic_type_level > expr_level_cutoff_limit { c.error('checker: too many levels of Checker.ensure_generic_type_specify_type_names calls: ${c.ensure_generic_type_level} ', pos) return false @@ -5015,7 +5015,7 @@ fn (mut c Checker) ensure_type_exists(typ ast.Type, pos token.Pos) bool { defer { c.type_level-- } - if c.type_level > checker.type_level_cutoff_limit { + if c.type_level > type_level_cutoff_limit { c.error('checker: too many levels of Checker.ensure_type_exists calls: ${c.type_level}, probably due to a self referencing type', pos) return false diff --git a/vlib/v/checker/containers.v b/vlib/v/checker/containers.v index c26dddca98d54f..fb7a25a5539faa 100644 --- a/vlib/v/checker/containers.v +++ b/vlib/v/checker/containers.v @@ -666,16 +666,16 @@ fn (mut c Checker) check_elements_initialized(typ ast.Type) ! { } if typ.is_any_kind_of_pointer() { if !c.pref.translated && !c.file.is_translated { - return checker.err_ref_uninitialized + return err_ref_uninitialized } else { return } } sym := c.table.sym(typ) if sym.kind == .interface_ { - return checker.err_interface_uninitialized + return err_interface_uninitialized } else if sym.kind == .sum_type { - return checker.err_sumtype_uninitialized + return err_sumtype_uninitialized } match sym.info { diff --git a/vlib/v/checker/fn.v b/vlib/v/checker/fn.v index 504ddf4fab6568..ce7b61c6c6df82 100644 --- a/vlib/v/checker/fn.v +++ b/vlib/v/checker/fn.v @@ -1051,7 +1051,7 @@ fn (mut c Checker) fn_call(mut node ast.CallExpr, mut continue_check &bool) ast. } } if is_native_builtin { - if node.args.len > 0 && fn_name in checker.print_everything_fns { + if node.args.len > 0 && fn_name in print_everything_fns { c.builtin_args(mut node, fn_name, func) return func.return_type } @@ -1218,7 +1218,7 @@ fn (mut c Checker) fn_call(mut node ast.CallExpr, mut continue_check &bool) ast. c.check_expected_arg_count(mut node, func) or { return func.return_type } } // println / eprintln / panic can print anything - if node.args.len > 0 && fn_name in checker.print_everything_fns { + if node.args.len > 0 && fn_name in print_everything_fns { c.builtin_args(mut node, fn_name, func) return func.return_type } @@ -2814,7 +2814,7 @@ fn (mut c Checker) check_expected_arg_count(mut node ast.CallExpr, f &ast.Fn) ! // check if multi-return is used as unique argument to the function if node.args.len == 1 && mut node.args[0].expr is ast.CallExpr { is_multi := node.args[0].expr.nr_ret_values > 1 - if is_multi && node.name !in checker.print_everything_fns { + if is_multi && node.name !in print_everything_fns { // it is a multi-return argument nr_args = node.args[0].expr.nr_ret_values if nr_args != nr_params { diff --git a/vlib/v/checker/str.v b/vlib/v/checker/str.v index 45e5554434990d..c196f2056f9351 100644 --- a/vlib/v/checker/str.v +++ b/vlib/v/checker/str.v @@ -160,13 +160,13 @@ fn (mut c Checker) string_lit(mut node ast.StringLiteral) ast.Type { first_digit := node.val[idx - 5] - 48 second_digit := node.val[idx - 4] - 48 if first_digit > 1 { - c.error(checker.unicode_lit_overflow_message, end_pos) + c.error(unicode_lit_overflow_message, end_pos) } else if first_digit == 1 && second_digit > 0 { - c.error(checker.unicode_lit_overflow_message, end_pos) + c.error(unicode_lit_overflow_message, end_pos) } } else { - c.error(checker.unicode_lit_overflow_message, end_pos) + c.error(unicode_lit_overflow_message, end_pos) } } idx++ @@ -202,11 +202,11 @@ fn (mut c Checker) int_lit(mut node ast.IntegerLiteral) ast.Type { lit := node.val.replace('_', '').all_after('-').to_upper() is_neg := node.val.starts_with('-') if lit.len > 2 && lit[0] == `0` && lit[1] in [`B`, `X`, `O`] { - if lohi := checker.iencoding_map[lit[1]] { + if lohi := iencoding_map[lit[1]] { c.check_num_literal(lohi, is_neg, lit[2..]) or { c.num_lit_overflow_error(node) } } } else { - lohi := checker.iencoding_map[`_`] + lohi := iencoding_map[`_`] c.check_num_literal(lohi, is_neg, lit) or { c.num_lit_overflow_error(node) } } return ast.int_literal_type diff --git a/vlib/v/doc/comment.v b/vlib/v/doc/comment.v index 5639befae39a30..dfb19d024d1a75 100644 --- a/vlib/v/doc/comment.v +++ b/vlib/v/doc/comment.v @@ -14,12 +14,12 @@ pub mut: // is_example returns true if the contents of this comment is an inline doc example. // The current convention is '// Example: ' pub fn (dc DocComment) is_example() bool { - return dc.text.trim_space().starts_with(doc.example_pattern) + return dc.text.trim_space().starts_with(example_pattern) } // example returns the content of the inline example body pub fn (dc DocComment) example() string { - return dc.text.all_after(doc.example_pattern) + return dc.text.all_after(example_pattern) } // is_multi_line_example returns true if an example line has no inline code diff --git a/vlib/v/doc/node.v b/vlib/v/doc/node.v index c3f6a5d0597a60..0dd1eec2940e54 100644 --- a/vlib/v/doc/node.v +++ b/vlib/v/doc/node.v @@ -15,7 +15,7 @@ pub fn (nodes []DocNode) find(symname string) !DocNode { // arrange sorts the DocNodes based on their symbols and names. pub fn (mut nodes []DocNode) arrange() { - if !doc.should_sort { + if !should_sort { return } mut kinds := []SymbolKind{} diff --git a/vlib/v/doc/utils.v b/vlib/v/doc/utils.v index b9a1ded7704516..997fb30065c26c 100644 --- a/vlib/v/doc/utils.v +++ b/vlib/v/doc/utils.v @@ -104,7 +104,7 @@ pub fn merge_doc_comments(comments []DocComment) string { // Use own paragraph for "highlight" comments. ll := l.to_lower() mut continue_line_loop := false - for key in doc.highlight_keys { + for key in highlight_keys { if ll.starts_with(key) { comment += '\n\n${key.title()}${l[key.len..]}' // Workaround for compiling with `v -cstrict -cc gcc vlib/v/doc/doc_test.v` @@ -117,7 +117,7 @@ pub fn merge_doc_comments(comments []DocComment) string { continue } line_no_spaces := l.replace(' ', '') - for ch in doc.horizontal_rule_chars { + for ch in horizontal_rule_chars { if line_no_spaces.starts_with(ch.repeat(3)) && line_no_spaces.count(ch) == line_no_spaces.len { comment += '\n' + l + '\n' diff --git a/vlib/v/fmt/fmt.v b/vlib/v/fmt/fmt.v index d83ee85973bde7..61c14da39687c2 100644 --- a/vlib/v/fmt/fmt.v +++ b/vlib/v/fmt/fmt.v @@ -167,7 +167,7 @@ pub fn (mut f Fmt) wrap_long_line(penalty_idx int, add_indent bool) bool { if f.buffering { return false } - if penalty_idx > 0 && f.line_len <= fmt.break_points[penalty_idx] { + if penalty_idx > 0 && f.line_len <= break_points[penalty_idx] { return false } if f.out.last() == ` ` { @@ -1782,7 +1782,7 @@ pub fn (mut f Fmt) sum_type_decl(node ast.SumTypeDecl) { for variant in variants { // 3 = length of ' = ' or ' | ' line_length += 3 + variant.name.len - if line_length > fmt.max_len || (variant.id != node.variants.len - 1 + if line_length > max_len || (variant.id != node.variants.len - 1 && node.variants[variant.id].end_comments.len > 0) { separator = '\n\t| ' is_multiline = true @@ -1889,7 +1889,7 @@ pub fn (mut f Fmt) array_init(node ast.ArrayInit) { if i == 0 { if f.array_init_depth > f.array_init_break.len { f.array_init_break << pos.line_nr > last_line_nr - || f.line_len + expr.pos().len > fmt.break_points[3] + || f.line_len + expr.pos().len > break_points[3] } } mut line_break := f.array_init_break[f.array_init_depth - 1] @@ -1911,7 +1911,7 @@ pub fn (mut f Fmt) array_init(node ast.ArrayInit) { single_line_expr := expr_is_single_line(expr) if single_line_expr { mut estr := '' - if !is_new_line && !f.buffering && f.line_len + expr.pos().len > fmt.max_len { + if !is_new_line && !f.buffering && f.line_len + expr.pos().len > max_len { if inc_indent { estr = f.node_str(expr) } @@ -2357,22 +2357,10 @@ pub fn (mut f Fmt) ident(node ast.Ident) { } } if !is_local && !node.name.contains('.') && !f.inside_const { - // Force usage of full path to const in the same module: - // `println(minute)` => `println(time.minute)` - // This makes it clear that a module const is being used - // (since V's consts are no longer ALL_CAP). - // ^^^ except for `main`, where consts are allowed to not have a `main.` prefix. if obj := f.file.global_scope.find('${f.cur_mod}.${node.name}') { if obj is ast.ConstField { - // "v.fmt.foo" => "fmt.foo" const_name := node.name.all_after_last('.') - if f.cur_mod == 'main' { - f.write(const_name) - } else { - short := '${f.cur_mod.all_after_last('.')}.${const_name}' - f.write(short) - f.mark_import_as_used(short) - } + f.write(const_name) if node.or_expr.kind == .block { f.or_expr(node.or_expr) } @@ -2471,7 +2459,7 @@ pub fn (mut f Fmt) if_expr(node ast.IfExpr) { } // When a single line if is really long, write it again as multiline, // except it is part of an InfixExpr. - if is_ternary && f.line_len > fmt.max_len && !f.buffering { + if is_ternary && f.line_len > max_len && !f.buffering { is_ternary = false f.single_line_if = false f.out.go_back_to(start_pos) @@ -2596,7 +2584,7 @@ pub fn (mut f Fmt) infix_expr(node ast.InfixExpr) { } if !buffering_save && f.buffering { f.buffering = false - if !f.single_line_if && f.line_len > fmt.max_len { + if !f.single_line_if && f.line_len > max_len { is_cond := node.op in [.and, .logical_or] f.wrap_infix(start_pos, start_len, is_cond) } @@ -2664,7 +2652,7 @@ fn (mut f Fmt) write_splitted_infix(conditions []string, penalties []int, ignore defer { f.wsinfix_depth-- } for i, cnd in conditions { c := cnd.trim_space() - if f.line_len + c.len < fmt.break_points[penalties[i]] { + if f.line_len + c.len < break_points[penalties[i]] { if (i > 0 && i < conditions.len) || (ignore_paren && i == 0 && c.len > 5 && c[3] == `(`) { f.write(' ') } @@ -2672,12 +2660,12 @@ fn (mut f Fmt) write_splitted_infix(conditions []string, penalties []int, ignore } else { is_paren_expr := (c[0] == `(` || (c.len > 5 && c[3] == `(`)) && c.ends_with(')') final_len := ((f.indent + 1) * 4) + c.len - if f.wsinfix_depth > fmt.wsinfix_depth_max { + if f.wsinfix_depth > wsinfix_depth_max { // limit indefinite recursion, by just giving up splitting: f.write(c) continue } - if final_len > fmt.max_len && is_paren_expr { + if final_len > max_len && is_paren_expr { conds, pens := split_up_infix(c, true, is_cond) f.write_splitted_infix(conds, pens, true, is_cond) continue @@ -2818,7 +2806,7 @@ fn (mut f Fmt) match_branch(branch ast.MatchBranch, single_line bool) { f.is_mbranch_expr = true for j, expr in branch.exprs { estr := f.node_str(expr).trim_space() - if f.line_len + estr.len + 2 > fmt.max_len { + if f.line_len + estr.len + 2 > max_len { f.remove_new_line() f.writeln('') } @@ -2913,7 +2901,7 @@ pub fn (mut f Fmt) or_expr(node ast.OrExpr) { // so, since this'll all be on one line, trim any possible whitespace str := f.node_str(node.stmts[0]).trim_space() single_line := ' or { ${str} }' - if single_line.len + f.line_len <= fmt.max_len { + if single_line.len + f.line_len <= max_len { f.write(single_line) return } @@ -3165,13 +3153,13 @@ pub fn (mut f Fmt) string_literal(node ast.StringLiteral) { if node.is_raw { f.write('${quote}${node.val}${quote}') } else { - unescaped_val := node.val.replace('${fmt.bs}${fmt.bs}', '\x01').replace_each([ - "${fmt.bs}'", + unescaped_val := node.val.replace('${bs}${bs}', '\x01').replace_each([ + "${bs}'", "'", - '${fmt.bs}"', + '${bs}"', '"', ]) - s := unescaped_val.replace_each(['\x01', '${fmt.bs}${fmt.bs}', quote, '${fmt.bs}${quote}']) + s := unescaped_val.replace_each(['\x01', '${bs}${bs}', quote, '${bs}${quote}']) f.write('${quote}${s}${quote}') } } @@ -3199,13 +3187,13 @@ pub fn (mut f Fmt) string_inter_literal(node ast.StringInterLiteral) { // work too different for the various exprs that are interpolated f.write(quote) for i, val in node.vals { - unescaped_val := val.replace('${fmt.bs}${fmt.bs}', '\x01').replace_each([ - "${fmt.bs}'", + unescaped_val := val.replace('${bs}${bs}', '\x01').replace_each([ + "${bs}'", "'", - '${fmt.bs}"', + '${bs}"', '"', ]) - s := unescaped_val.replace_each(['\x01', '${fmt.bs}${fmt.bs}', quote, '${fmt.bs}${quote}']) + s := unescaped_val.replace_each(['\x01', '${bs}${bs}', quote, '${bs}${quote}']) f.write('${s}') if i >= node.exprs.len { break diff --git a/vlib/v/fmt/tests/do_not_change_type_names_that_just_happen_to_have_the_module_as_a_substring_keep.vv b/vlib/v/fmt/tests/do_not_change_type_names_that_just_happen_to_have_the_module_as_a_substring_keep.vv index ea7a1054fd4e83..0694bee982d2a4 100644 --- a/vlib/v/fmt/tests/do_not_change_type_names_that_just_happen_to_have_the_module_as_a_substring_keep.vv +++ b/vlib/v/fmt/tests/do_not_change_type_names_that_just_happen_to_have_the_module_as_a_substring_keep.vv @@ -69,7 +69,7 @@ pub mut: // validate validates the header and returns its details if valid pub fn validate(data []u8, params DecompressParams) !GzipHeader { - if data.len < gzip.min_header_length { + if data.len < min_header_length { return error('data is too short, not gzip compressed?') } else if data[0] != 0x1f || data[1] != 0x8b { return error('wrong magic numbers, not gzip compressed?') @@ -82,7 +82,7 @@ pub fn validate(data []u8, params DecompressParams) !GzipHeader { // correctly, so we dont accidently decompress something that belongs // to the header - if data[3] & gzip.reserved_bits > 0 { + if data[3] & reserved_bits > 0 { // rfc 1952 2.3.1.2 Compliance // A compliant decompressor must give an error indication if any // reserved bit is non-zero, since such a bit could indicate the @@ -91,12 +91,12 @@ pub fn validate(data []u8, params DecompressParams) !GzipHeader { return error('reserved flags are set, unsupported field detected') } - if data[3] & gzip.fextra > 0 { + if data[3] & fextra > 0 { xlen := data[header.length] header.extra = data[header.length + 1..header.length + 1 + xlen] header.length += xlen + 1 } - if data[3] & gzip.fname > 0 { + if data[3] & fname > 0 { // filename is zero-terminated, so skip until we hit a zero byte for header.length < data.len && data[header.length] != 0x00 { header.filename << data[header.length] @@ -104,7 +104,7 @@ pub fn validate(data []u8, params DecompressParams) !GzipHeader { } header.length++ } - if data[3] & gzip.fcomment > 0 { + if data[3] & fcomment > 0 { // comment is zero-terminated, so skip until we hit a zero byte for header.length < data.len && data[header.length] != 0x00 { header.comment << data[header.length] @@ -112,7 +112,7 @@ pub fn validate(data []u8, params DecompressParams) !GzipHeader { } header.length++ } - if data[3] & gzip.fhcrc > 0 { + if data[3] & fhcrc > 0 { if header.length + 12 > data.len { return error('data too short') } diff --git a/vlib/v/fmt/tests/struct_decl_with_const_default_value_and_comments_keep.vv b/vlib/v/fmt/tests/struct_decl_with_const_default_value_and_comments_keep.vv index 5b38508ed2e471..bd0f2c43ff6903 100644 --- a/vlib/v/fmt/tests/struct_decl_with_const_default_value_and_comments_keep.vv +++ b/vlib/v/fmt/tests/struct_decl_with_const_default_value_and_comments_keep.vv @@ -10,7 +10,7 @@ pub mut: end_index i64 = -1 end_line u8 = `\n` - end_line_len int = foo.endline_cr_len // size of the endline rune \n = 1, \r\n = 2 + end_line_len int = endline_cr_len // size of the endline rune \n = 1, \r\n = 2 separator u8 = `,` // comma is the default separator separator_len int = 1 // size of the separator rune quote u8 = `"` // double quote is the standard quote char diff --git a/vlib/v/gen/c/assert.v b/vlib/v/gen/c/assert.v index d7de1f69c42a7e..beeda9202d6584 100644 --- a/vlib/v/gen/c/assert.v +++ b/vlib/v/gen/c/assert.v @@ -94,7 +94,7 @@ fn (mut g Gen) assert_subexpression_to_ctemp(expr ast.Expr, expr_type ast.Type) sym := g.table.final_sym(g.unwrap_generic(expr.expr.return_type)) if sym.kind == .struct_ { if (sym.info as ast.Struct).is_union { - return c.unsupported_ctemp_assert_transform + return unsupported_ctemp_assert_transform } } return g.new_ctemp_var_then_gen(expr, expr_type) @@ -102,7 +102,7 @@ fn (mut g Gen) assert_subexpression_to_ctemp(expr ast.Expr, expr_type ast.Type) } else {} } - return c.unsupported_ctemp_assert_transform + return unsupported_ctemp_assert_transform } fn (mut g Gen) gen_assert_postfailure_mode(node ast.AssertStmt) { diff --git a/vlib/v/gen/c/auto_str_methods.v b/vlib/v/gen/c/auto_str_methods.v index 21ec93528de964..2657bc40044165 100644 --- a/vlib/v/gen/c/auto_str_methods.v +++ b/vlib/v/gen/c/auto_str_methods.v @@ -292,8 +292,8 @@ fn (mut g Gen) gen_str_for_alias(info ast.Alias, styp string, str_fn_name string g.auto_str_funcs.writeln('\tstring tmp_ds = ${parent_str_fn_name}(${deref}it);') } g.auto_str_funcs.writeln('\tstring res = str_intp(3, _MOV((StrIntpData[]){ - {_SLIT0, ${c.si_s_code}, {.d_s = indents }}, - {_SLIT("${clean_type_v_type_name}("), ${c.si_s_code}, {.d_s = tmp_ds }}, + {_SLIT0, ${si_s_code}, {.d_s = indents }}, + {_SLIT("${clean_type_v_type_name}("), ${si_s_code}, {.d_s = tmp_ds }}, {_SLIT(")"), 0, {.d_c = 0 }} }));') g.auto_str_funcs.writeln('\tstring_free(&indents);') @@ -414,7 +414,7 @@ fn (mut g Gen) gen_str_for_interface(info ast.Interface, styp string, typ_str st } val += ')' res := 'str_intp(2, _MOV((StrIntpData[]){ - {_SLIT("${clean_interface_v_type_name}(\'"), ${c.si_s_code}, {.d_s = ${val}}}, + {_SLIT("${clean_interface_v_type_name}(\'"), ${si_s_code}, {.d_s = ${val}}}, {_SLIT("\')"), 0, {.d_c = 0 }} }))' fn_builder.write_string('\tif (x._typ == _${styp}_${sub_sym.cname}_index)') @@ -426,7 +426,7 @@ fn (mut g Gen) gen_str_for_interface(info ast.Interface, styp string, typ_str st } val += ')' res := 'str_intp(2, _MOV((StrIntpData[]){ - {_SLIT("${clean_interface_v_type_name}("), ${c.si_s_code}, {.d_s = ${val}}}, + {_SLIT("${clean_interface_v_type_name}("), ${si_s_code}, {.d_s = ${val}}}, {_SLIT(")"), 0, {.d_c = 0 }} }))' fn_builder.write_string('\tif (x._typ == _${styp}_${sub_sym.cname}_index)') @@ -484,7 +484,7 @@ fn (mut g Gen) gen_str_for_union_sum_type(info ast.SumType, styp string, typ_str } val += ')' res := 'str_intp(2, _MOV((StrIntpData[]){ - {_SLIT("${clean_sum_type_v_type_name}(\'"), ${c.si_s_code}, {.d_s = ${val}}}, + {_SLIT("${clean_sum_type_v_type_name}(\'"), ${si_s_code}, {.d_s = ${val}}}, {_SLIT("\')"), 0, {.d_c = 0 }} }))' fn_builder.write_string('\t\tcase ${int(typ)}: return ${res};\n') @@ -496,7 +496,7 @@ fn (mut g Gen) gen_str_for_union_sum_type(info ast.SumType, styp string, typ_str } val += ')' res := 'str_intp(2, _MOV((StrIntpData[]){ - {_SLIT("${clean_sum_type_v_type_name}("), ${c.si_s_code}, {.d_s = ${val}}}, + {_SLIT("${clean_sum_type_v_type_name}("), ${si_s_code}, {.d_s = ${val}}}, {_SLIT(")"), 0, {.d_c = 0 }} }))' fn_builder.write_string('\t\tcase ${int(typ)}: return ${res};\n') @@ -638,15 +638,15 @@ fn (mut g Gen) gen_str_for_array(info ast.Array, styp string, str_fn_name string } } else if sym.kind == .rune { // Rune are managed at this level as strings - g.auto_str_funcs.writeln('\t\tstring x = str_intp(2, _MOV((StrIntpData[]){{_SLIT("\`"), ${c.si_s_code}, {.d_s = ${elem_str_fn_name}(it) }}, {_SLIT("\`"), 0, {.d_c = 0 }}}));\n') + g.auto_str_funcs.writeln('\t\tstring x = str_intp(2, _MOV((StrIntpData[]){{_SLIT("\`"), ${si_s_code}, {.d_s = ${elem_str_fn_name}(it) }}, {_SLIT("\`"), 0, {.d_c = 0 }}}));\n') } else if sym.kind == .string { if typ.has_flag(.option) { func := g.get_str_fn(typ) g.auto_str_funcs.writeln('\t\tstring x = ${func}(it);\n') } else if is_elem_ptr { - g.auto_str_funcs.writeln('\t\tstring x = str_intp(2, _MOV((StrIntpData[]){{_SLIT("&\'"), ${c.si_s_code}, {.d_s = *it }}, {_SLIT("\'"), 0, {.d_c = 0 }}}));\n') + g.auto_str_funcs.writeln('\t\tstring x = str_intp(2, _MOV((StrIntpData[]){{_SLIT("&\'"), ${si_s_code}, {.d_s = *it }}, {_SLIT("\'"), 0, {.d_c = 0 }}}));\n') } else { - g.auto_str_funcs.writeln('\t\tstring x = str_intp(2, _MOV((StrIntpData[]){{_SLIT("\'"), ${c.si_s_code}, {.d_s = it }}, {_SLIT("\'"), 0, {.d_c = 0 }}}));\n') + g.auto_str_funcs.writeln('\t\tstring x = str_intp(2, _MOV((StrIntpData[]){{_SLIT("\'"), ${si_s_code}, {.d_s = it }}, {_SLIT("\'"), 0, {.d_c = 0 }}}));\n') } } else { // There is a custom .str() method, so use it. @@ -1007,10 +1007,10 @@ fn (mut g Gen) gen_str_for_struct(info ast.Struct, lang ast.Language, styp strin if is_first { // first field doesn't need \n - fn_body.write_string('\t\t{_SLIT0, ${c.si_s_code}, {.d_s=indents}}, {_SLIT(" ${field.name}: ${ptr_amp}${prefix}"), 0, {.d_c=0}}, ') + fn_body.write_string('\t\t{_SLIT0, ${si_s_code}, {.d_s=indents}}, {_SLIT(" ${field.name}: ${ptr_amp}${prefix}"), 0, {.d_c=0}}, ') is_first = false } else { - fn_body.write_string('\t\t{_SLIT("\\n"), ${c.si_s_code}, {.d_s=indents}}, {_SLIT(" ${field.name}: ${ptr_amp}${prefix}"), 0, {.d_c=0}}, ') + fn_body.write_string('\t\t{_SLIT("\\n"), ${si_s_code}, {.d_s=indents}}, {_SLIT(" ${field.name}: ${ptr_amp}${prefix}"), 0, {.d_c=0}}, ') } // custom methods management @@ -1034,7 +1034,7 @@ fn (mut g Gen) gen_str_for_struct(info ast.Struct, lang ast.Language, styp strin } // with floats we use always the g representation: if is_opt_field { - fn_body.write_string('{_SLIT("${quote_str}"), ${c.si_s_code}, {.d_s=') + fn_body.write_string('{_SLIT("${quote_str}"), ${si_s_code}, {.d_s=') } else if sym.kind !in [.f32, .f64] { fn_body.write_string('{_SLIT("${quote_str}"), ${int(base_fmt)}, {.${data_str(base_fmt)}=') } else { @@ -1113,7 +1113,7 @@ fn (mut g Gen) gen_str_for_struct(info ast.Struct, lang ast.Language, styp strin fn_body.writeln('}}, {_SLIT("${quote_str}"), 0, {.d_c=0}},') } - fn_body.writeln('\t\t{_SLIT("\\n"), ${c.si_s_code}, {.d_s=indents}}, {_SLIT("}"), 0, {.d_c=0}},') + fn_body.writeln('\t\t{_SLIT("\\n"), ${si_s_code}, {.d_s=indents}}, {_SLIT("}"), 0, {.d_c=0}},') fn_body.writeln('\t}));') } diff --git a/vlib/v/gen/c/cgen.v b/vlib/v/gen/c/cgen.v index 8390801d9f1e5f..c066140bff0314 100644 --- a/vlib/v/gen/c/cgen.v +++ b/vlib/v/gen/c/cgen.v @@ -1171,9 +1171,9 @@ fn (mut g Gen) option_type_name(t ast.Type) (string, string) { base = 'anon_fn_${g.table.fn_type_signature(sym.info.func)}' } if sym.language == .c && sym.kind == .struct_ { - styp = '${c.option_name}_${base.replace(' ', '_')}' + styp = '${option_name}_${base.replace(' ', '_')}' } else { - styp = '${c.option_name}_${base}' + styp = '${option_name}_${base}' } if t.has_flag(.generic) || t.is_ptr() { styp = styp.replace('*', '_ptr') @@ -1193,9 +1193,9 @@ fn (mut g Gen) result_type_name(t ast.Type) (string, string) { base = 'anon_fn_${g.table.fn_type_signature(sym.info.func)}' } if sym.language == .c && sym.kind == .struct_ { - styp = '${c.result_name}_${base.replace(' ', '_')}' + styp = '${result_name}_${base.replace(' ', '_')}' } else { - styp = '${c.result_name}_${base}' + styp = '${result_name}_${base}' } if t.has_flag(.generic) || t.is_ptr() { styp = styp.replace('*', '_ptr') @@ -1471,11 +1471,11 @@ fn (mut g Gen) write_chan_push_option_fns() { done << styp g.register_option(ast.void_type.set_flag(.option)) g.channel_definitions.writeln(' -static inline ${c.option_name}_void __Option_${styp}_pushval(${styp} ch, ${el_type} e) { +static inline ${option_name}_void __Option_${styp}_pushval(${styp} ch, ${el_type} e) { if (sync__Channel_try_push_priv(ch, &e, false)) { - return (${c.option_name}_void){ .state = 2, .err = _v_error(_SLIT("channel closed")), .data = {EMPTY_STRUCT_INITIALIZATION} }; + return (${option_name}_void){ .state = 2, .err = _v_error(_SLIT("channel closed")), .data = {EMPTY_STRUCT_INITIALIZATION} }; } - return (${c.option_name}_void){0}; + return (${option_name}_void){0}; }') } } @@ -1521,7 +1521,7 @@ fn (g &Gen) type_sidx(t ast.Type) string { pub fn (mut g Gen) write_typedef_types() { for sym in g.table.type_symbols { - if sym.name in c.builtins { + if sym.name in builtins { continue } match sym.kind { @@ -1602,7 +1602,7 @@ static inline void __${sym.cname}_pushval(${sym.cname} ch, ${push_arg} val) { } } for sym in g.table.type_symbols { - if sym.kind == .alias && sym.name !in c.builtins && sym.name !in ['byte', 'i32'] { + if sym.kind == .alias && sym.name !in builtins && sym.name !in ['byte', 'i32'] { g.write_alias_typesymbol_declaration(sym) } } @@ -1610,12 +1610,12 @@ static inline void __${sym.cname}_pushval(${sym.cname} ch, ${push_arg} val) { // Generating interfaces after all the common types have been defined // to prevent generating interface struct before definition of field types for sym in g.table.type_symbols { - if sym.kind == .interface_ && sym.name !in c.builtins { + if sym.kind == .interface_ && sym.name !in builtins { g.write_interface_typedef(sym) } } for sym in g.table.type_symbols { - if sym.kind == .interface_ && sym.name !in c.builtins { + if sym.kind == .interface_ && sym.name !in builtins { g.write_interface_typesymbol_declaration(sym) } } @@ -1920,7 +1920,7 @@ fn (mut g Gen) stmts_with_tmp_var(stmts []ast.Stmt, tmp_var string) bool { styp = g.base_type(ret_typ) g.write('_option_ok(&(${styp}[]) { ') g.expr_with_cast(stmt.expr, stmt.typ, ret_typ) - g.writeln(' }, (${c.option_name}*)(&${tmp_var}), sizeof(${styp}));') + g.writeln(' }, (${option_name}*)(&${tmp_var}), sizeof(${styp}));') } } } @@ -1952,7 +1952,7 @@ fn (mut g Gen) stmts_with_tmp_var(stmts []ast.Stmt, tmp_var string) bool { styp = g.base_type(ret_typ) g.write('_result_ok(&(${styp}[]) { ') g.expr_with_cast(stmt.expr, stmt.typ, ret_typ) - g.writeln(' }, (${c.result_name}*)(&${tmp_var}), sizeof(${styp}));') + g.writeln(' }, (${result_name}*)(&${tmp_var}), sizeof(${styp}));') } } } @@ -2099,10 +2099,10 @@ fn (mut g Gen) expr_with_tmp_var(expr ast.Expr, expr_typ ast.Type, ret_typ ast.T if simple_assign { g.writeln(';') } else { - g.writeln(' }, (${c.option_name}*)(&${tmp_var}), sizeof(${styp}));') + g.writeln(' }, (${option_name}*)(&${tmp_var}), sizeof(${styp}));') } } else { - g.writeln(' }, (${c.result_name}*)(&${tmp_var}), sizeof(${styp}));') + g.writeln(' }, (${result_name}*)(&${tmp_var}), sizeof(${styp}));') } g.set_current_pos_as_last_stmt_pos() } @@ -5157,8 +5157,8 @@ fn (mut g Gen) cast_expr(node ast.CastExpr) { g.writeln('${g.typ(parent_type)} ${tmp_var2};') g.write('_option_ok(&(${g.base_type(parent_type)}[]) { ') g.expr(node.expr) - g.writeln(' }, (${c.option_name}*)(&${tmp_var2}), sizeof(${g.base_type(parent_type)}));') - g.writeln('_option_ok(&(${g.typ(parent_type)}[]) { ${tmp_var2} }, (${c.option_name}*)&${tmp_var}, sizeof(${g.typ(parent_type)}));') + g.writeln(' }, (${option_name}*)(&${tmp_var2}), sizeof(${g.base_type(parent_type)}));') + g.writeln('_option_ok(&(${g.typ(parent_type)}[]) { ${tmp_var2} }, (${option_name}*)&${tmp_var}, sizeof(${g.typ(parent_type)}));') g.write(cur_stmt) g.write(tmp_var) } else if node.expr_type.has_flag(.option) { @@ -5532,7 +5532,7 @@ fn (mut g Gen) return_stmt(node ast.Return) { // handle promoting error/function returning result if fn_return_is_result { ftyp := g.typ(node.types[0]) - mut is_regular_result := ftyp == c.result_name + mut is_regular_result := ftyp == result_name if is_regular_result || node.types[0] == ast.error_type_idx { if g.fn_decl != unsafe { nil } && g.fn_decl.is_test { test_error_var := g.new_tmp_var() @@ -5642,11 +5642,11 @@ fn (mut g Gen) return_stmt(node ast.Return) { } g.write('}') if fn_return_is_option { - g.writeln(' }, (${c.option_name}*)(&${tmpvar}), sizeof(${styp}));') + g.writeln(' }, (${option_name}*)(&${tmpvar}), sizeof(${styp}));') g.write_defer_stmts_when_needed() g.write('return ${tmpvar}') } else if fn_return_is_result { - g.writeln(' }, (${c.result_name}*)(&${tmpvar}), sizeof(${styp}));') + g.writeln(' }, (${result_name}*)(&${tmpvar}), sizeof(${styp}));') g.write_defer_stmts_when_needed() g.write('return ${tmpvar}') } @@ -5679,7 +5679,7 @@ fn (mut g Gen) return_stmt(node ast.Return) { node.types[0].has_flag(.option) } } - if fn_return_is_option && !expr_type_is_opt && return_sym.name != c.option_name { + if fn_return_is_option && !expr_type_is_opt && return_sym.name != option_name { styp := g.base_type(fn_ret_type) g.writeln('${ret_typ} ${tmpvar};') g.write('_option_ok(&(${styp}[]) { ') @@ -5700,7 +5700,7 @@ fn (mut g Gen) return_stmt(node ast.Return) { g.write(', ') } } - g.writeln(' }, (${c.option_name}*)(&${tmpvar}), sizeof(${styp}));') + g.writeln(' }, (${option_name}*)(&${tmpvar}), sizeof(${styp}));') g.write_defer_stmts_when_needed() g.autofree_scope_vars(node.pos.pos - 1, node.pos.line_nr, true) g.writeln('return ${tmpvar};') @@ -5714,7 +5714,7 @@ fn (mut g Gen) return_stmt(node ast.Return) { node.types[0].has_flag(.result) } } - if fn_return_is_result && !expr_type_is_result && return_sym.name != c.result_name { + if fn_return_is_result && !expr_type_is_result && return_sym.name != result_name { styp := g.base_type(fn_ret_type) g.writeln('${ret_typ} ${tmpvar};') g.write('_result_ok(&(${styp}[]) { ') @@ -5737,7 +5737,7 @@ fn (mut g Gen) return_stmt(node ast.Return) { g.write(', ') } } - g.writeln(' }, (${c.result_name}*)(&${tmpvar}), sizeof(${styp}));') + g.writeln(' }, (${result_name}*)(&${tmpvar}), sizeof(${styp}));') g.write_defer_stmts_when_needed() g.autofree_scope_vars(node.pos.pos - 1, node.pos.line_nr, true) g.writeln('return ${tmpvar};') @@ -6633,7 +6633,7 @@ fn (mut g Gen) write_builtin_types() { mut builtin_types := []&ast.TypeSymbol{} // builtin types // builtin types need to be on top // everything except builtin will get sorted - for builtin_name in c.builtins { + for builtin_name in builtins { sym := g.table.sym_by_idx(g.table.type_idxs[builtin_name]) if sym.kind == .interface_ { g.write_interface_typedef(sym) @@ -6656,7 +6656,7 @@ fn (mut g Gen) write_sorted_types() { unsafe { mut symbols := []&ast.TypeSymbol{cap: g.table.type_symbols.len} // structs that need to be sorted for sym in g.table.type_symbols { - if sym.name !in c.builtins { + if sym.name !in builtins { symbols << sym } } @@ -6846,7 +6846,7 @@ fn (mut g Gen) write_sorted_fn_typesymbol_declaration() { } mut syms := []&ast.TypeSymbol{} // functions to be defined for sym in g.table.type_symbols { - if sym.kind == .function && sym.name !in c.builtins { + if sym.kind == .function && sym.name !in builtins { syms << sym } } @@ -7178,7 +7178,7 @@ fn (mut g Gen) or_block(var_name string, or_block ast.OrExpr, return_type ast.Ty styp := g.typ(g.fn_decl.return_type) err_obj := g.new_tmp_var() g.writeln('\t${styp} ${err_obj};') - g.writeln('\tmemcpy(&${err_obj}, &${cvar_name}, sizeof(${c.result_name}));') + g.writeln('\tmemcpy(&${err_obj}, &${cvar_name}, sizeof(${result_name}));') g.writeln('\treturn ${err_obj};') } } @@ -7219,7 +7219,7 @@ fn (mut g Gen) or_block(var_name string, or_block ast.OrExpr, return_type ast.Ty @[inline] fn c_name(name_ string) string { name := util.no_dots(name_) - if c.c_reserved_chk.matches(name) { + if c_reserved_chk.matches(name) { return '__v_${name}' } return name @@ -7228,7 +7228,7 @@ fn c_name(name_ string) string { @[inline] fn c_fn_name(name_ string) string { name := util.no_dots(name_) - if c.c_reserved_chk.matches(name) { + if c_reserved_chk.matches(name) { return '_v_${name}' } return name diff --git a/vlib/v/gen/c/cmain.v b/vlib/v/gen/c/cmain.v index 259e01338fe51c..3ad06cfedf92b0 100644 --- a/vlib/v/gen/c/cmain.v +++ b/vlib/v/gen/c/cmain.v @@ -44,7 +44,7 @@ fn (mut g Gen) gen_vlines_reset() { g.vlines_path = util.vlines_escape_path(g.pref.out_name_c, g.pref.ccompiler) g.writeln('') g.writeln('// Reset the C file/line numbers') - g.writeln('${c.reset_dbg_line} "${g.vlines_path}"') + g.writeln('${reset_dbg_line} "${g.vlines_path}"') g.writeln('') } } @@ -62,7 +62,7 @@ pub fn fix_reset_dbg_line(src string, out_file string) string { for idx, ob in src { if ob == `\n` { lines++ - if unsafe { vmemcmp(src.str + idx + 1, c.reset_dbg_line.str, c.reset_dbg_line.len) } == 0 { + if unsafe { vmemcmp(src.str + idx + 1, reset_dbg_line.str, reset_dbg_line.len) } == 0 { dbg_reset_line_idx = idx + 1 break } diff --git a/vlib/v/gen/c/fn.v b/vlib/v/gen/c/fn.v index a910ec8aaacd53..fbfaebc2419d31 100644 --- a/vlib/v/gen/c/fn.v +++ b/vlib/v/gen/c/fn.v @@ -407,7 +407,7 @@ fn (mut g Gen) gen_fn_decl(node &ast.FnDecl, skip bool) { } g.writeln(') {') if is_closure { - g.writeln('${cur_closure_ctx}* ${c.closure_ctx} = __CLOSURE_GET_DATA();') + g.writeln('${cur_closure_ctx}* ${closure_ctx} = __CLOSURE_GET_DATA();') } for i, is_promoted in heap_promoted { if is_promoted { @@ -535,7 +535,7 @@ fn (mut g Gen) c_fn_name(node &ast.FnDecl) string { unwrapped_rec_typ := g.unwrap_generic(node.receiver.typ) name = g.cc_type(unwrapped_rec_typ, false) + '_' + name if g.table.sym(unwrapped_rec_typ).kind == .placeholder { - name = name.replace_each(c.c_fn_name_escape_seq) + name = name.replace_each(c_fn_name_escape_seq) } } if node.language == .c { @@ -601,11 +601,11 @@ fn (mut g Gen) gen_anon_fn(mut node ast.AnonFn) { if var_sym.info is ast.ArrayFixed { g.write('.${var.name} = {') for i in 0 .. var_sym.info.size { - g.write('${c.closure_ctx}->${var.name}[${i}],') + g.write('${closure_ctx}->${var.name}[${i}],') } g.writeln('},') } else { - g.writeln('.${var.name} = ${c.closure_ctx}->${var.name},') + g.writeln('.${var.name} = ${closure_ctx}->${var.name},') } } } @@ -730,7 +730,7 @@ fn (mut g Gen) fn_decl_params(params []ast.Param, scope &ast.Scope, is_variadic typ = g.table.sym(typ).array_info().elem_type.set_flag(.variadic) } param_type_sym := g.table.sym(typ) - mut param_type_name := g.typ(typ).replace_each(c.c_fn_name_escape_seq) + mut param_type_name := g.typ(typ).replace_each(c_fn_name_escape_seq) if param_type_sym.kind == .function && !typ.has_flag(.option) { info := param_type_sym.info as ast.FnType func := info.func @@ -2177,7 +2177,7 @@ fn (mut g Gen) fn_call(node ast.CallExpr) { mut is_ptr := false if i == 0 { if obj.is_inherited { - g.write(c.closure_ctx + '->' + node.name) + g.write(closure_ctx + '->' + node.name) } else { g.write(node.name) } @@ -2199,7 +2199,7 @@ fn (mut g Gen) fn_call(node ast.CallExpr) { } is_fn_var = true } else if obj.is_inherited { - g.write(c.closure_ctx + '->' + node.name) + g.write(closure_ctx + '->' + node.name) is_fn_var = true } } diff --git a/vlib/v/gen/c/live.v b/vlib/v/gen/c/live.v index 281f4f51550db6..a0478865ea16c4 100644 --- a/vlib/v/gen/c/live.v +++ b/vlib/v/gen/c/live.v @@ -43,12 +43,12 @@ fn (mut g Gen) generate_hotcode_reloader_code() { for so_fn in g.hotcode_fn_names { load_code << 'impl_live_${so_fn} = dlsym(live_lib, "impl_live_${so_fn}");' } - phd = c.posix_hotcode_definitions_1 + phd = posix_hotcode_definitions_1 } else { for so_fn in g.hotcode_fn_names { load_code << 'impl_live_${so_fn} = (void *)GetProcAddress(live_lib, "impl_live_${so_fn}"); ' } - phd = c.windows_hotcode_definitions_1 + phd = windows_hotcode_definitions_1 } g.hotcode_definitions.writeln(phd.replace('@LOAD_FNS@', load_code.join('\n'))) } diff --git a/vlib/v/gen/c/reflection.v b/vlib/v/gen/c/reflection.v index 258304bfc20165..21df0f4e38d488 100644 --- a/vlib/v/gen/c/reflection.v +++ b/vlib/v/gen/c/reflection.v @@ -19,7 +19,7 @@ fn (mut g Gen) reflection_string(str string) int { @[inline] fn (mut g Gen) gen_reflection_strings() { for str, idx in g.reflection_strings { - g.writeln('\t${c.cprefix}add_string(_SLIT("${str}"), ${idx});') + g.writeln('\t${cprefix}add_string(_SLIT("${str}"), ${idx});') } } @@ -45,7 +45,7 @@ fn (g Gen) gen_functionarg_array(type_name string, node ast.Fn) string { // gen_functionarg_array generates the code for functionarg argument @[inline] fn (mut g Gen) gen_function_array(nodes []ast.Fn) string { - type_name := '${c.cprefix}Function' + type_name := '${cprefix}Function' if nodes.len == 0 { return g.gen_empty_array(type_name) @@ -61,11 +61,11 @@ fn (mut g Gen) gen_function_array(nodes []ast.Fn) string { // gen_reflection_fn generates C code for Function struct @[inline] fn (mut g Gen) gen_reflection_fn(node ast.Fn) string { - mut arg_str := '((${c.cprefix}Function){' + mut arg_str := '((${cprefix}Function){' v_name := node.name.all_after_last('.') arg_str += '.mod_name=_SLIT("${node.mod}"),' arg_str += '.name=_SLIT("${v_name}"),' - arg_str += '.args=${g.gen_functionarg_array(c.cprefix + 'FunctionArg', node)},' + arg_str += '.args=${g.gen_functionarg_array(cprefix + 'FunctionArg', node)},' arg_str += '.file_idx=${g.reflection_string(util.cescaped_path(node.file))},' arg_str += '.line_start=${node.pos.line_nr},' arg_str += '.line_end=${node.pos.last_line},' @@ -87,7 +87,7 @@ fn (mut g Gen) gen_reflection_sym(tsym ast.TypeSymbol) string { } info := g.gen_reflection_sym_info(tsym) methods := g.gen_function_array(tsym.methods) - return '(${c.cprefix}TypeSymbol){.name=_SLIT("${tsym.name}"),.idx=${tsym.idx},.parent_idx=${tsym.parent_idx},.language=${c.cprefix}VLanguage__${tsym.language},.kind=${c.cprefix}VKind__${kind_name},.info=${info},.methods=${methods}}' + return '(${cprefix}TypeSymbol){.name=_SLIT("${tsym.name}"),.idx=${tsym.idx},.parent_idx=${tsym.parent_idx},.language=${cprefix}VLanguage__${tsym.language},.kind=${cprefix}VKind__${kind_name},.info=${info},.methods=${methods}}' } // gen_attrs_array generates C code for []Attr @@ -107,11 +107,11 @@ fn (g Gen) gen_attrs_array(attrs []ast.Attr) string { @[inline] fn (g Gen) gen_fields_array(fields []ast.StructField) string { if fields.len == 0 { - return g.gen_empty_array('${c.cprefix}StructField') + return g.gen_empty_array('${cprefix}StructField') } - mut out := 'new_array_from_c_array(${fields.len},${fields.len},sizeof(${c.cprefix}StructField),' - out += '_MOV((${c.cprefix}StructField[${fields.len}]){' - out += fields.map('((${c.cprefix}StructField){.name=_SLIT("${it.name}"),.typ=${int(it.typ)},.attrs=${g.gen_attrs_array(it.attrs)},.is_pub=${it.is_pub},.is_mut=${it.is_mut}})').join(',') + mut out := 'new_array_from_c_array(${fields.len},${fields.len},sizeof(${cprefix}StructField),' + out += '_MOV((${cprefix}StructField[${fields.len}]){' + out += fields.map('((${cprefix}StructField){.name=_SLIT("${it.name}"),.typ=${int(it.typ)},.attrs=${g.gen_attrs_array(it.attrs)},.is_pub=${it.is_pub},.is_mut=${it.is_mut}})').join(',') out += '}))' return out } @@ -141,63 +141,63 @@ fn (mut g Gen) gen_reflection_sym_info(tsym ast.TypeSymbol) string { match tsym.kind { .array { info := tsym.info as ast.Array - s := 'ADDR(${c.cprefix}Array,(((${c.cprefix}Array){.nr_dims=${info.nr_dims},.elem_type=${int(info.elem_type)}})))' - return '(${c.cprefix}TypeInfo){._${c.cprefix}Array = memdup(${s},sizeof(${c.cprefix}Array)),._typ=${g.table.find_type_idx('v.reflection.Array')}}' + s := 'ADDR(${cprefix}Array,(((${cprefix}Array){.nr_dims=${info.nr_dims},.elem_type=${int(info.elem_type)}})))' + return '(${cprefix}TypeInfo){._${cprefix}Array = memdup(${s},sizeof(${cprefix}Array)),._typ=${g.table.find_type_idx('v.reflection.Array')}}' } .array_fixed { info := tsym.info as ast.ArrayFixed - s := 'ADDR(${c.cprefix}ArrayFixed,(((${c.cprefix}ArrayFixed){.size=${info.size},.elem_type=${int(info.elem_type)}})))' - return '(${c.cprefix}TypeInfo){._${c.cprefix}ArrayFixed=memdup(${s},sizeof(${c.cprefix}ArrayFixed)),._typ=${g.table.find_type_idx('v.reflection.ArrayFixed')}}' + s := 'ADDR(${cprefix}ArrayFixed,(((${cprefix}ArrayFixed){.size=${info.size},.elem_type=${int(info.elem_type)}})))' + return '(${cprefix}TypeInfo){._${cprefix}ArrayFixed=memdup(${s},sizeof(${cprefix}ArrayFixed)),._typ=${g.table.find_type_idx('v.reflection.ArrayFixed')}}' } .map { info := tsym.info as ast.Map - s := 'ADDR(${c.cprefix}Map,(((${c.cprefix}Map){.key_type=${int(info.key_type)},.value_type=${int(info.value_type)}})))' - return '(${c.cprefix}TypeInfo){._${c.cprefix}Map=memdup(${s},sizeof(${c.cprefix}Map)),._typ=${g.table.find_type_idx('v.reflection.Map')}}' + s := 'ADDR(${cprefix}Map,(((${cprefix}Map){.key_type=${int(info.key_type)},.value_type=${int(info.value_type)}})))' + return '(${cprefix}TypeInfo){._${cprefix}Map=memdup(${s},sizeof(${cprefix}Map)),._typ=${g.table.find_type_idx('v.reflection.Map')}}' } .sum_type { info := tsym.info as ast.SumType - s := 'ADDR(${c.cprefix}SumType,(((${c.cprefix}SumType){.parent_idx=${info.parent_type.idx()},.variants=${g.gen_type_array(info.variants)}})))' - return '(${c.cprefix}TypeInfo){._${c.cprefix}SumType=memdup(${s},sizeof(${c.cprefix}SumType)),._typ=${g.table.find_type_idx('v.reflection.SumType')}}' + s := 'ADDR(${cprefix}SumType,(((${cprefix}SumType){.parent_idx=${info.parent_type.idx()},.variants=${g.gen_type_array(info.variants)}})))' + return '(${cprefix}TypeInfo){._${cprefix}SumType=memdup(${s},sizeof(${cprefix}SumType)),._typ=${g.table.find_type_idx('v.reflection.SumType')}}' } .struct_ { info := tsym.info as ast.Struct attrs := g.gen_attrs_array(info.attrs) fields := g.gen_fields_array(info.fields) - s := 'ADDR(${c.cprefix}Struct,(((${c.cprefix}Struct){.parent_idx=${(tsym.info as ast.Struct).parent_type.idx()},.attrs=${attrs},.fields=${fields}})))' - return '(${c.cprefix}TypeInfo){._${c.cprefix}Struct=memdup(${s},sizeof(${c.cprefix}Struct)),._typ=${g.table.find_type_idx('v.reflection.Struct')}}' + s := 'ADDR(${cprefix}Struct,(((${cprefix}Struct){.parent_idx=${(tsym.info as ast.Struct).parent_type.idx()},.attrs=${attrs},.fields=${fields}})))' + return '(${cprefix}TypeInfo){._${cprefix}Struct=memdup(${s},sizeof(${cprefix}Struct)),._typ=${g.table.find_type_idx('v.reflection.Struct')}}' } .enum_ { info := tsym.info as ast.Enum vals := g.gen_string_array(info.vals) - s := 'ADDR(${c.cprefix}Enum,(((${c.cprefix}Enum){.vals=${vals},.is_flag=${info.is_flag}})))' - return '(${c.cprefix}TypeInfo){._${c.cprefix}Enum=memdup(${s},sizeof(${c.cprefix}Enum)),._typ=${g.table.find_type_idx('v.reflection.Enum')}}' + s := 'ADDR(${cprefix}Enum,(((${cprefix}Enum){.vals=${vals},.is_flag=${info.is_flag}})))' + return '(${cprefix}TypeInfo){._${cprefix}Enum=memdup(${s},sizeof(${cprefix}Enum)),._typ=${g.table.find_type_idx('v.reflection.Enum')}}' } .function { info := tsym.info as ast.FnType - s := 'ADDR(${c.cprefix}Function,${g.gen_reflection_fn(info.func)})' - return '(${c.cprefix}TypeInfo){._${c.cprefix}Function=memdup(${s},sizeof(${c.cprefix}Function)),._typ=${g.table.find_type_idx('v.reflection.Function')}}' + s := 'ADDR(${cprefix}Function,${g.gen_reflection_fn(info.func)})' + return '(${cprefix}TypeInfo){._${cprefix}Function=memdup(${s},sizeof(${cprefix}Function)),._typ=${g.table.find_type_idx('v.reflection.Function')}}' } .interface_ { name := tsym.name.all_after_last('.') info := tsym.info as ast.Interface methods := g.gen_function_array(info.methods) fields := g.gen_fields_array(info.fields) - s := 'ADDR(${c.cprefix}Interface,(((${c.cprefix}Interface){.name=_SLIT("${name}"),.methods=${methods},.fields=${fields},.is_generic=${info.is_generic}})))' - return '(${c.cprefix}TypeInfo){._${c.cprefix}Interface=memdup(${s},sizeof(${c.cprefix}Interface)),._typ=${g.table.find_type_idx('v.reflection.Interface')}}' + s := 'ADDR(${cprefix}Interface,(((${cprefix}Interface){.name=_SLIT("${name}"),.methods=${methods},.fields=${fields},.is_generic=${info.is_generic}})))' + return '(${cprefix}TypeInfo){._${cprefix}Interface=memdup(${s},sizeof(${cprefix}Interface)),._typ=${g.table.find_type_idx('v.reflection.Interface')}}' } .alias { info := tsym.info as ast.Alias - s := 'ADDR(${c.cprefix}Alias,(((${c.cprefix}Alias){.parent_idx=${info.parent_type.idx()},.language=${c.cprefix}VLanguage__${info.language.str()}})))' - return '(${c.cprefix}TypeInfo){._${c.cprefix}Alias=memdup(${s},sizeof(${c.cprefix}Alias)),._typ=${g.table.find_type_idx('v.reflection.Alias')}}' + s := 'ADDR(${cprefix}Alias,(((${cprefix}Alias){.parent_idx=${info.parent_type.idx()},.language=${cprefix}VLanguage__${info.language.str()}})))' + return '(${cprefix}TypeInfo){._${cprefix}Alias=memdup(${s},sizeof(${cprefix}Alias)),._typ=${g.table.find_type_idx('v.reflection.Alias')}}' } .multi_return { info := tsym.info as ast.MultiReturn - s := 'ADDR(${c.cprefix}MultiReturn,(((${c.cprefix}MultiReturn){.types=${g.gen_type_array(info.types)}})))' - return '(${c.cprefix}TypeInfo){._${c.cprefix}MultiReturn=memdup(${s},sizeof(${c.cprefix}MultiReturn)),._typ=${g.table.find_type_idx('v.reflection.MultiReturn')}}' + s := 'ADDR(${cprefix}MultiReturn,(((${cprefix}MultiReturn){.types=${g.gen_type_array(info.types)}})))' + return '(${cprefix}TypeInfo){._${cprefix}MultiReturn=memdup(${s},sizeof(${cprefix}MultiReturn)),._typ=${g.table.find_type_idx('v.reflection.MultiReturn')}}' } else { - s := 'ADDR(${c.cprefix}None,(((${c.cprefix}None){.parent_idx=${tsym.parent_idx},})))' - return '(${c.cprefix}TypeInfo){._${c.cprefix}None=memdup(${s},sizeof(${c.cprefix}None)),._typ=${g.table.find_type_idx('v.reflection.None')}}' + s := 'ADDR(${cprefix}None,(((${cprefix}None){.parent_idx=${tsym.parent_idx},})))' + return '(${cprefix}TypeInfo){._${cprefix}None=memdup(${s},sizeof(${cprefix}None)),._typ=${g.table.find_type_idx('v.reflection.None')}}' } } } @@ -206,19 +206,19 @@ fn (mut g Gen) gen_reflection_sym_info(tsym ast.TypeSymbol) string { fn (mut g Gen) gen_reflection_data() { // modules declaration for mod_name in g.table.modules { - g.writeln('\t${c.cprefix}add_module(_SLIT("${mod_name}"));') + g.writeln('\t${cprefix}add_module(_SLIT("${mod_name}"));') } // type symbols declaration for _, tsym in g.table.type_symbols { sym := g.gen_reflection_sym(tsym) - g.writeln('\t${c.cprefix}add_type_symbol(${sym});') + g.writeln('\t${cprefix}add_type_symbol(${sym});') } // types declaration for full_name, idx in g.table.type_idxs { name := full_name.all_after_last('.') - g.writeln('\t${c.cprefix}add_type((${c.cprefix}Type){.name=_SLIT("${name}"),.idx=${idx}});') + g.writeln('\t${cprefix}add_type((${cprefix}Type){.name=_SLIT("${name}"),.idx=${idx}});') } // func declaration (methods come from struct methods) @@ -227,7 +227,7 @@ fn (mut g Gen) gen_reflection_data() { continue } func := g.gen_reflection_fn(fn_) - g.writeln('\t${c.cprefix}add_func(${func});') + g.writeln('\t${cprefix}add_func(${func});') } g.gen_reflection_strings() diff --git a/vlib/v/gen/c/struct.v b/vlib/v/gen/c/struct.v index b5da913481cb1d..9af7d8ec00b241 100644 --- a/vlib/v/gen/c/struct.v +++ b/vlib/v/gen/c/struct.v @@ -32,7 +32,7 @@ fn (mut g Gen) struct_init(node ast.StructInit) { g.typ(node.typ) } mut shared_styp := '' // only needed for shared x := St{... - if styp in c.skip_struct_init { + if styp in skip_struct_init { // needed for c++ compilers g.go_back(3) return diff --git a/vlib/v/gen/c/testdata/freestanding_define/a_d_freestanding.c.v b/vlib/v/gen/c/testdata/freestanding_define/a_d_freestanding.c.v index d2b837c351eb5d..8a5ea1929fa997 100644 --- a/vlib/v/gen/c/testdata/freestanding_define/a_d_freestanding.c.v +++ b/vlib/v/gen/c/testdata/freestanding_define/a_d_freestanding.c.v @@ -4,5 +4,5 @@ const f = $embed_file(@FILE).len pub fn hi() { println('hi from a_d_freestanding.c.v') - println('f.len: ${freestanding_define.f}') + println('f.len: ${f}') } diff --git a/vlib/v/gen/c/testdata/freestanding_define/a_notd_freestanding.c.v b/vlib/v/gen/c/testdata/freestanding_define/a_notd_freestanding.c.v index 014ded57c678f2..1e175ffa07cbba 100644 --- a/vlib/v/gen/c/testdata/freestanding_define/a_notd_freestanding.c.v +++ b/vlib/v/gen/c/testdata/freestanding_define/a_notd_freestanding.c.v @@ -4,5 +4,5 @@ const f = $embed_file(@FILE).len pub fn hi() { println('hi from a_notd_freestanding.c.v') - println('f.len: ${freestanding_define.f}') + println('f.len: ${f}') } diff --git a/vlib/v/gen/golang/golang.v b/vlib/v/gen/golang/golang.v index 43db695f204ae8..5ba9ae36fafec4 100644 --- a/vlib/v/gen/golang/golang.v +++ b/vlib/v/gen/golang/golang.v @@ -2229,13 +2229,13 @@ pub fn (mut f Gen) string_literal(node ast.StringLiteral) { if node.is_raw { f.write('`${node.val}`') } else { - unescaped_val := node.val.replace('${golang.bs}${golang.bs}', '\x01').replace_each([ - "${golang.bs}'", + unescaped_val := node.val.replace('${bs}${bs}', '\x01').replace_each([ + "${bs}'", "'", - '${golang.bs}"', + '${bs}"', '"', ]) - s := unescaped_val.replace_each(['\x01', '${golang.bs}${golang.bs}', '"', '${golang.bs}"']) + s := unescaped_val.replace_each(['\x01', '${bs}${bs}', '"', '${bs}"']) f.write('"${s}"') } } @@ -2247,7 +2247,7 @@ pub fn (mut f Gen) string_inter_literal(node ast.StringInterLiteral) { // work too different for the various exprs that are interpolated f.write(quote) for i, val in node.vals { - f.write(val.replace("${golang.bs}'", "'")) + f.write(val.replace("${bs}'", "'")) if i >= node.exprs.len { break } diff --git a/vlib/v/gen/js/fn.v b/vlib/v/gen/js/fn.v index 30b52768d85b79..838d234b9de79e 100644 --- a/vlib/v/gen/js/fn.v +++ b/vlib/v/gen/js/fn.v @@ -407,7 +407,7 @@ fn (mut g JsGen) gen_call_expr(it ast.CallExpr) { mut name := g.js_name(it.name) is_print := name in ['print', 'println', 'eprint', 'eprintln', 'panic'] - if name in js.builtin_functions { + if name in builtin_functions { name = 'builtin__${name}' } print_method := name @@ -624,7 +624,7 @@ fn (mut g JsGen) gen_method_decl(it ast.FnDecl, typ FnGenType) { name = g.js_name(name) name = g.generic_fn_name(g.cur_concrete_types, name) - if name in js.builtin_functions { + if name in builtin_functions { name = 'builtin__${name}' } if it.is_pub && !it.is_method { @@ -802,7 +802,7 @@ fn (mut g JsGen) gen_anon_fn(mut fun ast.AnonFn) { name = g.js_name(name) name = g.generic_fn_name(g.table.cur_concrete_types, name) - if name in js.builtin_functions { + if name in builtin_functions { name = 'builtin__${name}' } if it.is_pub && !it.is_method { diff --git a/vlib/v/gen/js/js.v b/vlib/v/gen/js/js.v index d8bac5a10e19a5..8ebeccaf19acab 100644 --- a/vlib/v/gen/js/js.v +++ b/vlib/v/gen/js/js.v @@ -562,7 +562,7 @@ fn (mut g JsGen) js_name(name_ string) string { return name } name = name_.replace('.', '__') - if name in js.js_reserved { + if name in js_reserved { return '_v_${name}' } return name @@ -1095,7 +1095,7 @@ fn (mut g JsGen) assert_subexpression_to_ctemp(expr ast.Expr, expr_type ast.Type sym := g.table.final_sym(g.unwrap_generic(expr.expr.return_type)) if sym.kind == .struct_ { if (sym.info as ast.Struct).is_union { - return js.unsupported_ctemp_assert_transform + return unsupported_ctemp_assert_transform } } return g.new_ctemp_var_then_gen(expr, expr_type) @@ -1103,7 +1103,7 @@ fn (mut g JsGen) assert_subexpression_to_ctemp(expr ast.Expr, expr_type ast.Type } else {} } - return js.unsupported_ctemp_assert_transform + return unsupported_ctemp_assert_transform } fn (mut g JsGen) new_ctemp_var(expr ast.Expr, expr_type ast.Type) ast.CTempVar { @@ -1406,7 +1406,7 @@ fn (mut g JsGen) gen_assign_stmt(stmt ast.AssignStmt, semicolon bool) { // TODO: Multiple types?? should_cast := stmt.left_types.len != 0 - && g.table.type_kind(stmt.left_types.first()) in js.shallow_equatables + && g.table.type_kind(stmt.left_types.first()) in shallow_equatables && (g.cast_stack.len <= 0 || stmt.left_types.first() != g.cast_stack.last()) if should_cast { g.cast_stack << stmt.left_types.first() @@ -1826,7 +1826,7 @@ fn (mut g JsGen) gen_return_stmt(it ast.Return) { if fn_return_is_option { option_none := node.exprs[0] is ast.None ftyp := g.typ(node.types[0]) - mut is_regular_option := ftyp == js.option_name + mut is_regular_option := ftyp == option_name if option_none || is_regular_option || node.types[0] == ast.error_type_idx { if !isnil(g.fn_decl) && g.fn_decl.is_test { test_error_var := g.new_tmp_var() @@ -1851,7 +1851,7 @@ fn (mut g JsGen) gen_return_stmt(it ast.Return) { tmp := g.new_tmp_var() g.write('const ${tmp} = new ') - g.writeln('${js.option_name}({});') + g.writeln('${option_name}({});') g.write('${tmp}.state = new u8(0);') g.write('${tmp}.data = ') if it.exprs.len == 1 { @@ -1898,7 +1898,7 @@ fn (mut g JsGen) gen_struct_decl(node ast.StructDecl) { if name.starts_with('JS.') { return } - if name in js.v_types && g.ns.name == 'builtin' { + if name in v_types && g.ns.name == 'builtin' { return } js_name := g.js_name(name) @@ -2106,7 +2106,7 @@ fn (mut g JsGen) gen_array_init_expr(it ast.ArrayInit) { } else { styp := g.typ(it.elem_type) - c := if styp in js.v_types { + c := if styp in v_types { g.gen_array_init_values_prim(it.exprs, styp) } else { g.gen_array_init_values(it.exprs) @@ -2991,7 +2991,7 @@ fn (mut g JsGen) gen_infix_expr(it ast.InfixExpr) { right := g.unwrap(node.right_type) has_operator_overloading := g.table.has_method(left.sym, '==') if has_operator_overloading - || (l_sym.kind in js.shallow_equatables && r_sym.kind in js.shallow_equatables) { + || (l_sym.kind in shallow_equatables && r_sym.kind in shallow_equatables) { if node.op == .ne { g.write('!') } diff --git a/vlib/v/gen/js/sourcemap/source_map.v b/vlib/v/gen/js/sourcemap/source_map.v index 4cfcbf5bb751b7..8533c4cc5d4cf5 100644 --- a/vlib/v/gen/js/sourcemap/source_map.v +++ b/vlib/v/gen/js/sourcemap/source_map.v @@ -27,7 +27,7 @@ pub mut: pub fn new_sourcemap(file string, source_root string, sources_content_inline bool) SourceMap { return SourceMap{ - version: sourcemap.source_map_version + version: source_map_version file: file source_root: source_root mappings: new_mappings() diff --git a/vlib/v/gen/js/sourcemap/vlq/vlq.v b/vlib/v/gen/js/sourcemap/vlq/vlq.v index 0831297ddf3ea7..b1d2c4aaf09c52 100644 --- a/vlib/v/gen/js/sourcemap/vlq/vlq.v +++ b/vlib/v/gen/js/sourcemap/vlq/vlq.v @@ -33,10 +33,10 @@ fn abs64(x i64) u64 { @[inline] fn decode64(input u8) u8 { $if debug { - assert input >= vlq.enc_char_special_plus - assert input <= vlq.enc_char_end_zl + assert input >= enc_char_special_plus + assert input <= enc_char_end_zl } - return u8(vlq.enc_index[input - vlq.enc_char_special_plus]) + return u8(enc_index[input - enc_char_special_plus]) } // Decode a single VLQ value from the input stream, returning the value. @@ -62,16 +62,16 @@ pub fn decode(mut input io.Reader) !i64 { return error('no content') } digit = decode64(buf[0]) - keep_going = (digit & vlq.continued) != 0 + keep_going = (digit & continued) != 0 - digit_value := u64(digit & vlq.mask) << u32(shifter) // TODO: check Overflow + digit_value := u64(digit & mask) << u32(shifter) // TODO: check Overflow accum += digit_value - shifter += vlq.shift + shifter += shift } abs_value := accum / 2 - if abs_value > vlq.max_i64 { + if abs_value > max_i64 { return error('Overflow') } @@ -84,7 +84,7 @@ fn encode64(input u8) u8 { $if debug { assert input < 64 } - return vlq.enc_table[input] + return enc_table[input] } // Encode a value as Base64 VLQ, sending it to the writer @@ -94,15 +94,15 @@ pub fn encode(value i64, mut output io.Writer) ! { if signed { if value_u64 == 0 { // Wrapped - value_u64 = vlq.max_i64 + 1 + value_u64 = max_i64 + 1 } value_u64 |= 1 } for { - mut digit := u8(value_u64) & vlq.mask - value_u64 >>= vlq.shift + mut digit := u8(value_u64) & mask + value_u64 >>= shift if value_u64 > 0 { - digit |= vlq.continued + digit |= continued } bytes := [encode64(digit)] output.write(bytes) or { return error('Write failed') } diff --git a/vlib/v/gen/js/tests/hello/hello.v b/vlib/v/gen/js/tests/hello/hello.v index 304c46d2180b3a..e724c0e857dcaf 100644 --- a/vlib/v/gen/js/tests/hello/hello.v +++ b/vlib/v/gen/js/tests/hello/hello.v @@ -23,7 +23,7 @@ pub enum Ccc { pub fn debugger() string { v := Bbb{} - return hello.hello + return hello } pub fn excited() string { diff --git a/vlib/v/gen/native/amd64.v b/vlib/v/gen/native/amd64.v index cf3a417a5c518c..d9d5774e90cf86 100644 --- a/vlib/v/gen/native/amd64.v +++ b/vlib/v/gen/native/amd64.v @@ -103,10 +103,10 @@ const amd64_cpuregs = ['eax', 'ecx', 'edx', 'ebx', 'esp', 'ebp', 'esi', 'edi'] fn amd64_get_call_regs(os pref.OS) []Amd64Register { return match os { .windows { - native.amd64_windows_call_regs + amd64_windows_call_regs } else { - native.amd64_system_v_call_regs + amd64_system_v_call_regs } } } @@ -114,10 +114,10 @@ fn amd64_get_call_regs(os pref.OS) []Amd64Register { fn amd64_get_call_sseregs(os pref.OS) []Amd64SSERegister { return match os { .windows { - native.amd64_windows_call_sseregs + amd64_windows_call_sseregs } else { - native.amd64_system_v_call_sseregs + amd64_system_v_call_sseregs } } } @@ -2946,7 +2946,7 @@ fn (mut c Amd64) gen_asm_stmt(asm_node ast.AsmStmt) { match a { ast.AsmRegister { regname = a.name - reg = i32(native.amd64_cpuregs.index(regname)) + reg = i32(amd64_cpuregs.index(regname)) line += a.typ.str() } ast.IntegerLiteral { diff --git a/vlib/v/gen/native/blacklist.v b/vlib/v/gen/native/blacklist.v index ccd5243c21cd2e..3ce367005de16e 100644 --- a/vlib/v/gen/native/blacklist.v +++ b/vlib/v/gen/native/blacklist.v @@ -33,5 +33,5 @@ const whitelist = { } fn (g Gen) is_blacklisted(name string, is_builtin bool) bool { - return native.whitelist[name] or { is_builtin } + return whitelist[name] or { is_builtin } } diff --git a/vlib/v/gen/native/dos.v b/vlib/v/gen/native/dos.v index 87373bbfe18d90..16f85c89650a7b 100644 --- a/vlib/v/gen/native/dos.v +++ b/vlib/v/gen/native/dos.v @@ -92,7 +92,7 @@ pub fn (mut g Gen) gen_dos_header() { g.println('; ' + dos_header_description[i]) } } - if g.pos() != native.dos_header_size { + if g.pos() != dos_header_size { g.n_error('Invalid dos header size') } diff --git a/vlib/v/gen/native/elf.v b/vlib/v/gen/native/elf.v index 2250a1203d8fa1..7cf7ee4d1d6591 100644 --- a/vlib/v/gen/native/elf.v +++ b/vlib/v/gen/native/elf.v @@ -151,24 +151,24 @@ mut: fn (mut g Gen) default_elf_header() ElfHeader { machine := if g.pref.arch == .arm64 { - native.elf_arm64 + elf_arm64 } else { - native.elf_amd64 + elf_amd64 } return ElfHeader{ - ident_class: native.elf_class64 - ident_data: native.elf_data_le - ident_version: native.elf_version - ident_osabi: native.elf_osabi_none - ident_abiversion: native.elf_abiversion - typ: native.elf_type_none + ident_class: elf_class64 + ident_data: elf_data_le + ident_version: elf_version + ident_osabi: elf_osabi_none + ident_abiversion: elf_abiversion + typ: elf_type_none machine: i16(machine) - version: native.elf_version - phoff: native.elf_header_size - ehsize: native.elf_header_size - phentsize: native.elf_phentry_size - shentsize: native.elf_shentry_size + version: elf_version + phoff: elf_header_size + ehsize: elf_header_size + phentsize: elf_phentry_size + shentsize: elf_shentry_size } } @@ -250,13 +250,13 @@ fn (mut g Gen) gen_program_header(p ProgramHeader) { g.write64(p.offset) // p_offset g.println('; p_offset') g.write64(if p.vaddr == 0 { - native.segment_start + segment_start } else { p.vaddr }) // p_vaddr g.println('; p_vaddr') g.write64(if p.paddr == 0 { - native.segment_start + segment_start } else { p.paddr }) // p_paddr @@ -469,8 +469,8 @@ fn (mut g Gen) create_shstrtab(mut sections []Section) { names[sections.len] = '.shstrtab' - mut shstrtab := g.create_section(names[sections.len], native.elf_sht_strtab, 0, 0, - 1, 0, g.create_string_table_section(names)) + mut shstrtab := g.create_section(names[sections.len], elf_sht_strtab, 0, 0, 1, 0, + g.create_string_table_section(names)) shstrtab.header.name = offset sections << shstrtab @@ -486,31 +486,30 @@ fn (mut g Gen) create_symtab(mut sections []Section, mut table []SymbolTableSect entry.name = offset - if (entry.info >> 4) == native.elf_stb_local { + if (entry.info >> 4) == elf_stb_local { local_symbols++ } offset += i32(entry.str_name.len + 1) } - sections << g.create_section('.strtab', native.elf_sht_strtab, 0, 0, 1, 0, g.create_string_table_section(names)) + sections << g.create_section('.strtab', elf_sht_strtab, 0, 0, 1, 0, g.create_string_table_section(names)) sections << // index of .strtab - g.create_section('.symtab', native.elf_sht_symtab, i32(sections.len - 1), local_symbols, - native.elf_sh_symtab_align, native.elf_sh_symtab_entsize, table) + g.create_section('.symtab', elf_sht_symtab, i32(sections.len - 1), local_symbols, + elf_sh_symtab_align, elf_sh_symtab_entsize, table) } fn (mut g Gen) create_relocation(name string, mut sections []Section, table []RelASection) Section { - mut section := g.create_section(name, native.elf_sht_rela, g.find_section_header('.symtab', + mut section := g.create_section(name, elf_sht_rela, g.find_section_header('.symtab', sections), 1, 8, 24, table) - section.header.flags = i64(native.elf_shf_info_link) + section.header.flags = i64(elf_shf_info_link) sections << section return section } fn (mut g Gen) create_progbits(name string, flags u64, data []u8) Section { - mut section := g.create_section(name, native.elf_sht_progbits, 0, 0, 1, data.len, - ProgBitsSection{data}) + mut section := g.create_section(name, elf_sht_progbits, 0, 0, 1, data.len, ProgBitsSection{data}) section.header.flags = i64(flags) return section } @@ -576,7 +575,7 @@ fn (mut g Gen) gen_symtab_data(section Section, data []SymbolTableSection) { g.println('; SHT_SYMTAB ${symbol.str_name}') } - size := native.elf_symtab_size * data.len + size := elf_symtab_size * data.len g.write64_at(section.header.pos + 32, i64(size)) } @@ -621,7 +620,7 @@ fn (mut g Gen) gen_section_data(sections []Section) { g.println('; SHT_RELA `${rela.name}` (${rela.offset}, ${rela.info}, ${rela.addend})') } - size := native.elf_rela_size * data.len + size := elf_rela_size * data.len g.write64_at(section.header.pos + 32, i64(size)) } HashSection { @@ -635,7 +634,7 @@ fn (mut g Gen) gen_section_data(sections []Section) { g.println('; SHT_DYNAMIC (${dyn.tag}, ${dyn.un})') } - size := native.elf_dynamic_size * data.len + size := elf_dynamic_size * data.len g.write64_at(section.header.pos + 32, i64(size)) } NoteSection { @@ -664,7 +663,7 @@ fn (mut g Gen) gen_section_data(sections []Section) { g.write64(data.info) g.println('; SHT_REL (${data.offset}, ${data.info})') - size := native.elf_rel_size + size := elf_rel_size g.write64_at(section.header.pos + 32, i64(size)) } ShLibSection { @@ -690,30 +689,30 @@ pub fn (mut g Gen) symtab_get_index(symbols []SymbolTableSection, name string) i } pub fn (mut g Gen) generate_linkable_elf_header() { - elf_type := native.elf_type_rel // PIE (use _exec for non-relocatable executables) + elf_type := elf_type_rel // PIE (use _exec for non-relocatable executables) // generate program headers mut program_headers := []ProgramHeader{} - program_headers << g.create_program_header(native.elf_pt_load, 5, native.elf_p_align) + program_headers << g.create_program_header(elf_pt_load, 5, elf_p_align) // generate sections mut sections := [ Section{}, // null section as first section - g.create_progbits('.text', native.elf_shf_alloc | native.elf_shf_execinstr, []), - g.create_progbits('.data', native.elf_shf_write | native.elf_shf_alloc, []), - g.create_progbits('.bss', native.elf_shf_write | native.elf_shf_alloc, []), + g.create_progbits('.text', elf_shf_alloc | elf_shf_execinstr, []), + g.create_progbits('.data', elf_shf_write | elf_shf_alloc, []), + g.create_progbits('.bss', elf_shf_write | elf_shf_alloc, []), ] g.symbol_table = [ SymbolTableSection{}, // first is null - g.create_symbol_table_section('main', native.elf_stt_notype, native.elf_stb_global, - native.elf_stv_default, 0, 0, i16(g.find_section_header('.text', sections))), // main label points to entry point address - g.create_symbol_table_section('_GLOBAL_OFFSET_TABLE_', native.elf_stt_notype, - native.elf_stb_global, native.elf_stv_default, 0, 0, 0), + g.create_symbol_table_section('main', elf_stt_notype, elf_stb_global, elf_stv_default, + 0, 0, i16(g.find_section_header('.text', sections))), // main label points to entry point address + g.create_symbol_table_section('_GLOBAL_OFFSET_TABLE_', elf_stt_notype, elf_stb_global, + elf_stv_default, 0, 0, 0), ] for symbol in g.extern_symbols { - g.symbol_table << g.create_symbol_table_section(symbol[2..], native.elf_stt_notype, - native.elf_stb_global, native.elf_stv_default, 0, 0, 0) + g.symbol_table << g.create_symbol_table_section(symbol[2..], elf_stt_notype, elf_stb_global, + elf_stv_default, 0, 0, 0) } g.create_symtab(mut sections, mut g.symbol_table) // create the .symtab section g.create_relocation('.rela.text', mut sections, []) @@ -722,7 +721,7 @@ pub fn (mut g Gen) generate_linkable_elf_header() { mut elf_header := g.default_elf_header() elf_header.typ = i16(elf_type) - elf_header.shoff = native.elf_header_size + native.elf_phentry_size * program_headers.len + elf_header.shoff = elf_header_size + elf_phentry_size * program_headers.len elf_header.phnum = i16(program_headers.len) elf_header.shnum = i16(sections.len) elf_header.shstrndx = i16(g.find_section_header('.shstrtab', sections)) @@ -757,7 +756,7 @@ pub fn (mut g Gen) generate_linkable_elf_header() { g.elf_text_header_addr = text_section.header.offset g.write64_at(g.elf_text_header_addr + 24, g.pos()) // write the code start pos to the text section - g.code_gen.call(native.placeholder) + g.code_gen.call(placeholder) g.println('; call main.main') g.code_gen.mov64(g.code_gen.main_reg(), 0) g.code_gen.ret() @@ -767,11 +766,11 @@ pub fn (mut g Gen) generate_linkable_elf_header() { } pub fn (mut g Gen) generate_simple_elf_header() { - elf_type := native.elf_type_exec + elf_type := elf_type_exec - mut phdr := g.create_program_header(native.elf_pt_load, 5, native.elf_p_align) - phdr.vaddr = native.segment_start - phdr.paddr = native.segment_start + mut phdr := g.create_program_header(elf_pt_load, 5, elf_p_align) + phdr.vaddr = segment_start + phdr.paddr = segment_start mut elf_header := g.default_elf_header() elf_header.typ = i16(elf_type) @@ -780,7 +779,7 @@ pub fn (mut g Gen) generate_simple_elf_header() { elf_header.shentsize = i16(0) elf_header.shnum = i16(0) elf_header.shstrndx = i16(0) - elf_header.entry = native.segment_start + native.elf_header_size + native.elf_phentry_size + elf_header.entry = segment_start + elf_header_size + elf_phentry_size g.gen_elf_header(elf_header) @@ -795,7 +794,7 @@ pub fn (mut g Gen) generate_simple_elf_header() { g.code_start_pos = g.pos() g.debug_pos = i32(g.pos()) - g.code_gen.call(native.placeholder) + g.code_gen.call(placeholder) g.println('; call main.main') // generate exit syscall @@ -845,8 +844,7 @@ pub fn (mut g Gen) gen_rela_section() { mut relocations := []RelASection{} for call_pos, symbol in g.extern_fn_calls { relocations << g.create_rela_section(symbol, call_pos - g.code_start_pos + 2, - g.symtab_get_index(g.symbol_table, symbol[2..]), native.elf_r_amd64_gotpcrelx, - -4) + g.symtab_get_index(g.symbol_table, symbol[2..]), elf_r_amd64_gotpcrelx, -4) } g.elf_rela_section.data = relocations g.gen_section_data([g.elf_rela_section]) diff --git a/vlib/v/gen/native/gen.v b/vlib/v/gen/native/gen.v index c50ee0a7119d3f..1f89611524d828 100644 --- a/vlib/v/gen/native/gen.v +++ b/vlib/v/gen/native/gen.v @@ -1219,9 +1219,9 @@ pub fn escape_string(s string) string { mut out := []u8{cap: s.len} for c in s { - if c in native.escape_codes { - out << native.escape_char - out << native.escape_codes[c] + if c in escape_codes { + out << escape_char + out << escape_codes[c] } else { out << c } diff --git a/vlib/v/gen/native/macho.v b/vlib/v/gen/native/macho.v index 72836e36e8b34f..f2f74390195c43 100644 --- a/vlib/v/gen/native/macho.v +++ b/vlib/v/gen/native/macho.v @@ -42,13 +42,13 @@ struct Reloc { } fn (mut g Gen) macho_segment64_pagezero() { - g.macho_add_loadcommand(native.lc_segment_64, 72) + g.macho_add_loadcommand(lc_segment_64, 72) g.write_string_with_padding('__PAGEZERO', 16) // section name g.write64(0) // vmaddr if g.pref.arch == .amd64 { g.write64(i64(g.get_pagesize())) // vmsize } else { - g.write64(native.base_addr) // vmsize + g.write64(base_addr) // vmsize } g.write64(0) // fileoff g.write64(0) // filesize @@ -72,17 +72,17 @@ fn (mut g Gen) macho_patch_header() { // probably unnecessary fn (mut g Gen) macho_chained_fixups() { - g.macho_add_loadcommand(native.lc_dyld_chained_fixups, 16) + g.macho_add_loadcommand(lc_dyld_chained_fixups, 16) g.write32(0x4000) // dataoff g.write32(56) // datasize - g.macho_add_loadcommand(native.lc_dyld_exports_trie, 16) + g.macho_add_loadcommand(lc_dyld_exports_trie, 16) g.write32(0x4000) // dataoff g.write32(56) // datasize } fn (mut g Gen) macho_segment64_linkedit() { - g.macho_add_loadcommand(native.lc_segment_64, 0x48) + g.macho_add_loadcommand(lc_segment_64, 0x48) g.write_string_with_padding('__LINKEDIT', 16) if g.pref.arch == .amd64 { @@ -112,7 +112,7 @@ fn (mut g Gen) macho_header(ncmds i32, bintype i32) i32 { g.write32(0x01000007) // CPU_TYPE_X64 g.write32(0x80000003) // CPU_SUBTYPE_X64 } - g.write32(native.mh_execute) // filetype + g.write32(mh_execute) // filetype g.write32(ncmds) // ncmds cmdsize_offset := i32(g.buf.len) @@ -129,9 +129,9 @@ fn (mut g Gen) macho_header(ncmds i32, bintype i32) i32 { fn (mut g Gen) macho_segment64_text() []i32 { mut patch := []i32{} - g.macho_add_loadcommand(native.lc_segment_64, 152) + g.macho_add_loadcommand(lc_segment_64, 152) g.write_string_with_padding('__TEXT', 16) // section name - g.write64(native.base_addr) // vmaddr + g.write64(base_addr) // vmaddr g.write64(i64(g.get_pagesize()) * 2) // vmsize g.write64(0) // fileoff @@ -145,12 +145,12 @@ fn (mut g Gen) macho_segment64_text() []i32 { g.write_string_with_padding('__text', 16) // section name g.write_string_with_padding('__TEXT', 16) // segment name if g.pref.arch == .arm64 { - g.write64(native.base_addr + i64(g.get_pagesize())) // vmaddr + g.write64(base_addr + i64(g.get_pagesize())) // vmaddr g.write64(0) // vmsize g.write32(0) // offset g.write32(4) // align } else { - g.write64(native.base_addr + i64(g.get_pagesize())) // vmaddr + g.write64(base_addr + i64(g.get_pagesize())) // vmaddr patch << i32(g.buf.len) g.write64(0) // vmsize g.write32(g.get_pagesize()) // offset @@ -174,7 +174,7 @@ fn (mut g Gen) macho_segment64_text() []i32 { fn (mut g Gen) macho_symtab() { if g.pref.arch == .arm64 { - g.macho_add_loadcommand(native.lc_dyld_info_only, 48) + g.macho_add_loadcommand(lc_dyld_info_only, 48) g.write32(0) // rebase_off g.write32(0) // rebase_size g.write32(0) // bind_off @@ -187,13 +187,13 @@ fn (mut g Gen) macho_symtab() { g.write32(56) // export_size } - g.macho_add_loadcommand(native.lc_symtab, 24) + g.macho_add_loadcommand(lc_symtab, 24) g.write32(0x1000) // symoff g.write32(0) // nsyms g.write32(0x1000) // stroff g.write32(0) // strsize - g.macho_add_loadcommand(native.lc_dysymtab, 0x50) + g.macho_add_loadcommand(lc_dysymtab, 0x50) g.write32(0) // ilocalsym g.write32(0) // nlocalsym g.write32(0) // iextdefsym @@ -215,12 +215,12 @@ fn (mut g Gen) macho_symtab() { } fn (mut g Gen) macho_dylibs() { - g.macho_add_loadcommand(native.lc_load_dylinker, 32) + g.macho_add_loadcommand(lc_load_dylinker, 32) g.write32(12) // offset g.write_string_with_padding('/usr/lib/dyld', 16) g.write32(0) // padding // can be removed - g.macho_add_loadcommand(native.lc_load_dylib, 56) + g.macho_add_loadcommand(lc_load_dylib, 56) g.write32(24) // offset g.write32(0) // ts // g.write32(0x051f6403) // g.write32(1) // current version @@ -230,7 +230,7 @@ fn (mut g Gen) macho_dylibs() { } fn (mut g Gen) macho_main(addr i32) { - g.macho_add_loadcommand(native.lc_main, 24) + g.macho_add_loadcommand(lc_main, 24) g.write32(addr) // entrypoint g.write32(0) // initial_stacksize } @@ -240,7 +240,7 @@ pub fn (mut g Gen) generate_macho_header() { g.code_start_pos = i64(pagesize) g.debug_pos = pagesize ncmds := i32(0) // 9+ 2 -2 -3 -1 - cmdsize_offset := g.macho_header(ncmds, native.mh_execute) + cmdsize_offset := g.macho_header(ncmds, mh_execute) g.macho_segment64_pagezero() g.size_pos = g.macho_segment64_text() @@ -281,7 +281,7 @@ pub fn (mut g Gen) generate_macho_object_header() { g.write32(0x01000007) // CPU_TYPE_X64 g.write32(3) // CPU_SUBTYPE_X64 } - g.write32(native.mh_object) // MH_OBJECT + g.write32(mh_object) // MH_OBJECT text_offset := i32(0x138) g.write32(4) // # of load commands g.write32(text_offset - 0x20) // size of load commands // 0x138-0x20 @@ -313,7 +313,7 @@ pub fn (mut g Gen) generate_macho_object_header() { } g.write32(0x160) // relocation offset g.write32(0x1) // # of relocations - g.write32(i32(native.s_attr_some_instructions | native.s_attr_pure_instructions)) + g.write32(i32(s_attr_some_instructions | s_attr_pure_instructions)) g.write32(0) g.write32(0) g.write32(0) @@ -327,7 +327,7 @@ pub fn (mut g Gen) generate_macho_object_header() { // lc_symtab g.sym_table_command() - g.macho_add_loadcommand(native.lc_dysymtab, native.macho_d_size) + g.macho_add_loadcommand(lc_dysymtab, macho_d_size) g.write32(0) g.write32(2) g.write32(2) @@ -420,7 +420,7 @@ fn (mut g Gen) sym_table_command() { name: 'ltmp1' is_ext: false } - g.macho_add_loadcommand(native.lc_symtab, native.macho_symcmd_size) + g.macho_add_loadcommand(lc_symtab, macho_symcmd_size) sym_table_offset := i32(0x168) g.write32(sym_table_offset) g_syms_len := i32(4) diff --git a/vlib/v/gen/native/pe.v b/vlib/v/gen/native/pe.v index 5efbf0eddb6e6e..7d4e4aa2fa293f 100644 --- a/vlib/v/gen/native/pe.v +++ b/vlib/v/gen/native/pe.v @@ -149,7 +149,7 @@ pub fn (mut g Gen) gen_pe_header() { 0, // symbol table 0, // number of symbols 0, - native.pe_opt_hdr_size, // 40 // size of optional header + pe_opt_hdr_size, // 40 // size of optional header i32(PeCharacteristics.executable_image), ] @@ -166,7 +166,7 @@ pub fn (mut g Gen) gen_pe_header() { 0x16: '; mSizeOfOptionalHeader' 0x18: '; mCharacteristics' } - assert native.pe_coff_hdr_size == pe_header.len * 2 + assert pe_coff_hdr_size == pe_header.len * 2 } g.pe_coff_hdr_pos = g.pos() @@ -182,7 +182,7 @@ pub fn (mut g Gen) gen_pe_header() { // optional here comes here p_opthdr := g.pos() // should be 0x98 - if p_opthdr != native.optdr_location { + if p_opthdr != optdr_location { g.n_error('Invalid optdr location ${p_opthdr} != 0x98') } @@ -235,21 +235,21 @@ struct Pe32PlusOptionalHeader { fn (mut g Gen) get_pe32_plus_optional_header() Pe32PlusOptionalHeader { return Pe32PlusOptionalHeader{ magic: .pe32plus - major_linker_version: native.pe_major_linker_version - minor_linker_version: native.pe_minor_linker_version - image_base: native.image_base - section_alignment: native.pe_section_align - file_alignment: native.pe_file_align - major_os_version: native.pe_major_os_version - minor_os_version: native.pe_minor_os_version - major_subsystem_version: native.pe_major_subsystem_version - minor_subsystem_version: native.pe_minor_subsystem_version - size_of_headers: native.pe_header_size + major_linker_version: pe_major_linker_version + minor_linker_version: pe_minor_linker_version + image_base: image_base + section_alignment: pe_section_align + file_alignment: pe_file_align + major_os_version: pe_major_os_version + minor_os_version: pe_minor_os_version + major_subsystem_version: pe_major_subsystem_version + minor_subsystem_version: pe_minor_subsystem_version + size_of_headers: pe_header_size subsystem: .windows_cui - size_of_stack_reserve: native.pe_stack_size - size_of_stack_commit: native.pe_stack_size - size_of_heap_reserve: native.pe_heap_size - number_of_rva_and_sizes: native.pe_num_data_dirs + size_of_stack_reserve: pe_stack_size + size_of_stack_commit: pe_stack_size + size_of_heap_reserve: pe_heap_size + number_of_rva_and_sizes: pe_num_data_dirs } } @@ -406,13 +406,13 @@ const pe_default_data_dirs = [ ] fn get_pe_data_dirs() PeDataDirs { - assert native.pe_data_dir_names.len == native.pe_num_data_dirs - assert native.pe_default_data_dirs.len <= native.pe_num_data_dirs + assert pe_data_dir_names.len == pe_num_data_dirs + assert pe_default_data_dirs.len <= pe_num_data_dirs mut dd := PeDataDirs{} - for i in 0 .. native.pe_num_data_dirs { - dd.dirs[i] = native.pe_default_data_dirs[i] or { PeDataDir{} } - dd.debugnames[i] = native.pe_data_dir_names[i] + for i in 0 .. pe_num_data_dirs { + dd.dirs[i] = pe_default_data_dirs[i] or { PeDataDir{} } + dd.debugnames[i] = pe_data_dir_names[i] } return dd } @@ -477,14 +477,14 @@ fn (mut s PeSection) set_size_of_raw_data(mut g Gen, size i32) { } fn (mut s PeSection) set_virtual_address(mut g Gen, addr i32) { - aligned := (addr + native.pe_section_align - 1) & ~(native.pe_section_align - 1) + aligned := (addr + pe_section_align - 1) & ~(pe_section_align - 1) s.header.virtual_address = aligned g.write32_at(s.header_pos + pe_section_header_offsetof(.virtual_address), aligned) } fn (mut s PeSection) set_virtual_size(mut g Gen, size i32) { - aligned := (size + native.pe_section_align - 1) & ~(native.pe_section_align - 1) + aligned := (size + pe_section_align - 1) & ~(pe_section_align - 1) s.header.virtual_size = aligned g.write32_at(s.header_pos + pe_section_header_offsetof(.virtual_size), aligned) @@ -580,10 +580,10 @@ fn (mut g Gen) gen_pe_idt(idt &PeImportDirectoryTable, dll_name string) { fields := [idt.import_lookup_table_rva, idt.time_date_stamp, idt.forwarder_chain, idt.name_rva, idt.import_address_table_rva] - assert fields.len == native.pe_idt_field_description.len + assert fields.len == pe_idt_field_description.len for i, field in fields { g.write32(field) - g.println('; ' + native.pe_idt_field_description[i]) + g.println('; ' + pe_idt_field_description[i]) } g.println('^^^ Import Directory Table (${dll_name})') } @@ -602,7 +602,7 @@ fn (mut g Gen) gen_pe_idata() { } mut idata_section := &mut g.pe_sections[idata_section_index] - g.align_to(native.pe_file_align) + g.align_to(pe_file_align) g.println('; padding to 0x${g.pos().hex()}') idata_pos := g.pos() @@ -709,17 +709,17 @@ pub fn (mut g Gen) generate_pe_header() { g.pe_sections = [ g.create_pe_section('.idata', virtual_address: 0x1000 - characteristics: native.pe_scn_cnt_initialized_data | native.pe_scn_mem_read | native.pe_scn_mem_write + characteristics: pe_scn_cnt_initialized_data | pe_scn_mem_read | pe_scn_mem_write ), g.create_pe_section('.text', - characteristics: native.pe_scn_cnt_initialized_data | native.pe_scn_cnt_code | native.pe_scn_mem_execute | native.pe_scn_mem_read + characteristics: pe_scn_cnt_initialized_data | pe_scn_cnt_code | pe_scn_mem_execute | pe_scn_mem_read ), ] g.gen_pe_sections() g.gen_pe_idata() - g.align_to(native.pe_file_align) + g.align_to(pe_file_align) g.println('') g.println('^^^ padding to addr 0x${g.pos().hex()}') @@ -779,7 +779,7 @@ fn (mut g Gen) patch_pe_code_size() { fn (mut g Gen) patch_pe_image_size() { last_section := g.pe_sections.last() image_size := (last_section.header.virtual_address + last_section.header.virtual_size + - native.pe_section_align - 1) & ~(native.pe_section_align - 1) + pe_section_align - 1) & ~(pe_section_align - 1) g.write32_at(g.pe_opt_hdr_pos + pe32_plus_optional_header_offsetof(.size_of_image), image_size) } @@ -787,7 +787,7 @@ fn (mut g Gen) patch_pe_image_size() { pub fn (mut g Gen) generate_pe_footer() { g.sym_string_table() - g.align_to(native.pe_file_align) + g.align_to(pe_file_align) g.file_size_pos = g.pos() if g.pe_opt_hdr_pos == 0 { diff --git a/vlib/v/gen/native/readdll.c.v b/vlib/v/gen/native/readdll.c.v index 8c7736357dab05..11dcbe47f6c42d 100644 --- a/vlib/v/gen/native/readdll.c.v +++ b/vlib/v/gen/native/readdll.c.v @@ -100,12 +100,12 @@ fn get_dllexports(mut file os.File) !map[string]bool { if optional_header.magic != u16(PeMagic.pe32plus) { return error('wrong magic bytes: `${optional_header.magic.hex()}`, want: `${u16(PeMagic.pe32plus).hex()}`') } - if optional_header.number_of_rva_and_sizes <= native.pe_export_data_dir_index { + if optional_header.number_of_rva_and_sizes <= pe_export_data_dir_index { return map[string]bool{} // no exports in this file } sec_hdroffset = opt_hdroffset + u32(pe_opt_hdr_size) - read_pe_data_dir(mut file, opt_hdroffset + pe32_plus_opt_hdr_size, native.pe_export_data_dir_index)! + read_pe_data_dir(mut file, opt_hdroffset + pe32_plus_opt_hdr_size, pe_export_data_dir_index)! } u16(PeMachine.i386) { return error('32-bit (i386) dlls not supported yet') @@ -138,8 +138,8 @@ fn parse_export_section(mut file os.File, export_data_dir PeDataDir, section_hea mut name_ptr := export_directory.name_ptr_rva - ref mut buf := []u8{} for _ in 0 .. export_directory.number_of_name_ptrs { - ptr := binary.little_endian_u32(file.read_bytes_at(native.pe_dword_size, name_ptr)) - name_ptr += native.pe_dword_size + ptr := binary.little_endian_u32(file.read_bytes_at(pe_dword_size, name_ptr)) + name_ptr += pe_dword_size mut j := u32(0) buf.clear() @@ -241,9 +241,9 @@ struct PeExportDirectoryRead { } fn read_pe_export_directory(mut file os.File, offset u64) !PeExportDirectoryRead { - buf := file.read_bytes_at(native.pe_export_directory_size, offset) - if buf.len != native.pe_export_directory_size { - return error('error reading export directory (${native.pe_export_directory_size} bytes)') + buf := file.read_bytes_at(pe_export_directory_size, offset) + if buf.len != pe_export_directory_size { + return error('error reading export directory (${pe_export_directory_size} bytes)') } return PeExportDirectoryRead{ diff --git a/vlib/v/help/help.v b/vlib/v/help/help.v index 5929b809b09862..519b707740fba4 100644 --- a/vlib/v/help/help.v +++ b/vlib/v/help/help.v @@ -39,7 +39,7 @@ pub fn print_and_exit(topic string, opts ExitOptions) { exit(fail_code) } } - if topic in help.cli_topics { + if topic in cli_topics { vexe := get_vexe() os.system('${os.quoted_path(vexe)} ${topic} --help') exit(opts.exit_code) diff --git a/vlib/v/parser/comptime.v b/vlib/v/parser/comptime.v index b4a7671c60942a..6e8661c6debc48 100644 --- a/vlib/v/parser/comptime.v +++ b/vlib/v/parser/comptime.v @@ -16,8 +16,8 @@ fn (mut p Parser) parse_comptime_type() ast.ComptimeType { pos := p.tok.pos() p.check(.dollar) name := p.check_name() - if name !in parser.comptime_types { - p.error('unsupported compile-time type `${name}`: only ${parser.comptime_types} are supported') + if name !in comptime_types { + p.error('unsupported compile-time type `${name}`: only ${comptime_types} are supported') } mut kind := ast.ComptimeTypeKind.unknown kind = match name { @@ -132,7 +132,7 @@ fn (mut p Parser) comptime_call() ast.ComptimeCall { p.check(.dot) } method_name := p.check_name() - if method_name !in parser.supported_comptime_calls { + if method_name !in supported_comptime_calls { p.error(error_msg) return err_node } diff --git a/vlib/v/parser/expr.v b/vlib/v/parser/expr.v index f515654ae8c571..31965a130ab4a0 100644 --- a/vlib/v/parser/expr.v +++ b/vlib/v/parser/expr.v @@ -10,8 +10,8 @@ const max_expr_level = 100 @[inline] fn (mut p Parser) check_expr_level() ! { - if p.expr_level > parser.max_expr_level { - return error('expr level > ${parser.max_expr_level}') + if p.expr_level > max_expr_level { + return error('expr level > ${max_expr_level}') } } diff --git a/vlib/v/parser/parse_type.v b/vlib/v/parser/parse_type.v index 850313a241e2b7..e2a1c8eab15448 100644 --- a/vlib/v/parser/parse_type.v +++ b/vlib/v/parser/parse_type.v @@ -416,9 +416,9 @@ fn (mut p Parser) parse_inline_sum_type() ast.Type { } variants := p.parse_sum_type_variants() if variants.len > 1 { - if variants.len > parser.maximum_inline_sum_type_variants { + if variants.len > maximum_inline_sum_type_variants { pos := variants[0].pos.extend(variants.last().pos) - p.warn_with_pos('an inline sum type expects a maximum of ${parser.maximum_inline_sum_type_variants} types (${variants.len} were given)', + p.warn_with_pos('an inline sum type expects a maximum of ${maximum_inline_sum_type_variants} types (${variants.len} were given)', pos) } mut variant_names := []string{} @@ -870,7 +870,7 @@ fn (mut p Parser) parse_generic_inst_type(name string) ast.Type { defer { p.generic_type_level-- } - if p.generic_type_level > parser.generic_type_level_cutoff_limit { + if p.generic_type_level > generic_type_level_cutoff_limit { p.error('too many levels of Parser.parse_generic_inst_type() calls: ${p.generic_type_level}, probably due to too many layers embedded generic type') return ast.void_type } diff --git a/vlib/v/parser/parser.v b/vlib/v/parser/parser.v index 18754c77b770b9..1a831e2b66cebf 100644 --- a/vlib/v/parser/parser.v +++ b/vlib/v/parser/parser.v @@ -203,7 +203,7 @@ const normalised_working_folder = (os.real_path(os.getwd()) + os.path_separator) pub fn (mut p Parser) set_path(path string) { p.file_path = path p.file_base = os.base(path) - p.file_display_path = os.real_path(p.file_path).replace_once(parser.normalised_working_folder, + p.file_display_path = os.real_path(p.file_path).replace_once(normalised_working_folder, '').replace('\\', '/') p.inside_vlib_file = os.dir(path).contains('vlib') p.inside_test_file = p.file_base.ends_with('_test.v') || p.file_base.ends_with('_test.vv') @@ -1259,8 +1259,8 @@ fn (mut p Parser) asm_stmt(is_top_level bool) ast.AsmStmt { p.next() has_suffix := p.tok.lit[p.tok.lit.len - 1] in [`b`, `w`, `l`, `q`] - if !(p.tok.lit in parser.allowed_lock_prefix_ins || (has_suffix - && p.tok.lit[0..p.tok.lit.len - 1] in parser.allowed_lock_prefix_ins)) { + if !(p.tok.lit in allowed_lock_prefix_ins + || (has_suffix && p.tok.lit[0..p.tok.lit.len - 1] in allowed_lock_prefix_ins)) { p.error('The lock prefix cannot be used on this instruction') } name += ' ' @@ -2545,7 +2545,7 @@ fn (mut p Parser) is_generic_cast() bool { break } - if i > 20 || tok.kind !in parser.valid_tokens_inside_types { + if i > 20 || tok.kind !in valid_tokens_inside_types { return false } } diff --git a/vlib/v/parser/tmpl.v b/vlib/v/parser/tmpl.v index 1e05071a1a5ca1..728bea79874754 100644 --- a/vlib/v/parser/tmpl.v +++ b/vlib/v/parser/tmpl.v @@ -76,7 +76,7 @@ fn is_html_open_tag(name string, s string) bool { fn insert_template_code(fn_name string, tmpl_str_start string, line string) string { // HTML, may include `@var` // escaped by cgen, unless it's a `vweb.RawHtml` string - trailing_bs := parser.tmpl_str_end + 'sb_${fn_name}.write_u8(92)\n' + tmpl_str_start + trailing_bs := tmpl_str_end + 'sb_${fn_name}.write_u8(92)\n' + tmpl_str_start round1 := ['\\', '\\\\', r"'", "\\'", r'@', r'$'] round2 := [r'$$', r'\@', r'.$', r'.@'] mut rline := line.replace_each(round1).replace_each(round2) @@ -325,7 +325,7 @@ fn vweb_tmpl_${fn_name}() string { continue } if line.contains('@if ') { - source.writeln(parser.tmpl_str_end) + source.writeln(tmpl_str_end) pos := line.index('@if') or { continue } source.writeln('if ' + line[pos + 4..] + '{') source.writeln(tmpl_str_start) @@ -334,7 +334,7 @@ fn vweb_tmpl_${fn_name}() string { if line.contains('@end') { // Remove new line byte source.go_back(1) - source.writeln(parser.tmpl_str_end) + source.writeln(tmpl_str_end) source.writeln('}') source.writeln(tmpl_str_start) continue @@ -342,7 +342,7 @@ fn vweb_tmpl_${fn_name}() string { if line.contains('@else') { // Remove new line byte source.go_back(1) - source.writeln(parser.tmpl_str_end) + source.writeln(tmpl_str_end) pos := line.index('@else') or { continue } source.writeln('}' + line[pos + 1..] + '{') // source.writeln(' } else { ') @@ -355,7 +355,7 @@ fn vweb_tmpl_${fn_name}() string { if source.len > 1 { source.go_back(1) } - source.writeln(parser.tmpl_str_end) + source.writeln(tmpl_str_end) pos := line.index('@for') or { continue } source.writeln('for ' + line[pos + 4..] + '{') source.writeln(tmpl_str_start) @@ -459,7 +459,7 @@ fn vweb_tmpl_${fn_name}() string { } } - source.writeln(parser.tmpl_str_end) + source.writeln(tmpl_str_end) source.writeln('\t_tmpl_res_${fn_name} := sb_${fn_name}.str() ') source.writeln('\treturn _tmpl_res_${fn_name}') source.writeln('}') diff --git a/vlib/v/parser/v_parser_test.v b/vlib/v/parser/v_parser_test.v index ad3da8c3ea316d..0749e1032848aa 100644 --- a/vlib/v/parser/v_parser_test.v +++ b/vlib/v/parser/v_parser_test.v @@ -291,8 +291,8 @@ fn parse(output_mode pref.OutputMode) ! { mut files := []string{} // mode_files := os.walk_ext(os.join_path(vroot, 'vlib/v/parser/testdata/${output_mode}'), '.vv') // files << mode_files - scan_v(mut files, os.join_path(parser.vroot, 'vlib'))! - scan_v(mut files, os.join_path(parser.vroot, 'cmd'))! + scan_v(mut files, os.join_path(vroot, 'vlib'))! + scan_v(mut files, os.join_path(vroot, 'cmd'))! mut pref_ := pref.new_preferences() pref_.output_mode = output_mode for idx, f in files { diff --git a/vlib/v/pkgconfig/pkgconfig.v b/vlib/v/pkgconfig/pkgconfig.v index f96d49e831f3cd..13271cce44ffb3 100644 --- a/vlib/v/pkgconfig/pkgconfig.v +++ b/vlib/v/pkgconfig/pkgconfig.v @@ -159,7 +159,7 @@ fn (mut pc PkgConfig) resolve(pkgname string) !string { } pub fn atleast(v string) bool { - v0 := semver.from(pkgconfig.version) or { return false } + v0 := semver.from(version) or { return false } v1 := semver.from(v) or { return false } return v0 > v1 } @@ -250,7 +250,7 @@ fn (mut pc PkgConfig) load_paths() { } } else { if pc.options.use_default_paths { - for path in pkgconfig.default_paths { + for path in default_paths { pc.add_path(path) } } diff --git a/vlib/v/pref/pref.v b/vlib/v/pref/pref.v index 682f88577fe1e9..2363bafa715c95 100644 --- a/vlib/v/pref/pref.v +++ b/vlib/v/pref/pref.v @@ -1205,7 +1205,7 @@ fn (mut prefs Preferences) parse_define(define string) { } pub fn supported_test_runners_list() string { - return pref.supported_test_runners.map('`${it}`').join(', ') + return supported_test_runners.map('`${it}`').join(', ') } pub fn (pref &Preferences) should_trace_fn_name(fname string) bool { diff --git a/vlib/v/scanner/scanner.v b/vlib/v/scanner/scanner.v index beea6fc05655d9..15147c7a533b0e 100644 --- a/vlib/v/scanner/scanner.v +++ b/vlib/v/scanner/scanner.v @@ -154,8 +154,8 @@ pub fn new_scanner(text string, comments_mode CommentsMode, pref_ &pref.Preferen is_print_rel_paths_on_error: true is_fmt: pref_.is_fmt comments_mode: comments_mode - file_path: scanner.internally_generated_v_code - file_base: scanner.internally_generated_v_code + file_path: internally_generated_v_code + file_base: internally_generated_v_code } s.scan_all_tokens_in_buffer() return s @@ -267,7 +267,7 @@ fn (s Scanner) num_lit(start int, end int) string { mut b := malloc_noscan(end - start + 1) // add a byte for the endstring 0 mut i_no_sep := 0 for i in start .. end { - if txt[i] != scanner.num_sep { + if txt[i] != num_sep { b[i_no_sep] = txt[i] i_no_sep++ } @@ -283,15 +283,15 @@ fn (mut s Scanner) ident_bin_number() string { mut first_wrong_digit := `\0` start_pos := s.pos s.pos += 2 // skip '0b' - if s.pos < s.text.len && s.text[s.pos] == scanner.num_sep { + if s.pos < s.text.len && s.text[s.pos] == num_sep { s.error('separator `_` is only valid between digits in a numeric literal') } for s.pos < s.text.len { c := s.text[s.pos] - if c == scanner.num_sep && s.text[s.pos - 1] == scanner.num_sep { + if c == num_sep && s.text[s.pos - 1] == num_sep { s.error('cannot use `_` consecutively') } - if !c.is_bin_digit() && c != scanner.num_sep { + if !c.is_bin_digit() && c != num_sep { if (!c.is_digit() && !c.is_letter()) || s.is_inside_string || s.is_nested_string { break } else if !has_wrong_digit { @@ -302,7 +302,7 @@ fn (mut s Scanner) ident_bin_number() string { } s.pos++ } - if s.text[s.pos - 1] == scanner.num_sep { + if s.text[s.pos - 1] == num_sep { s.pos-- s.error('cannot use `_` at the end of a numeric literal') } else if start_pos + 2 == s.pos { @@ -327,15 +327,15 @@ fn (mut s Scanner) ident_hex_number() string { return '0x' } s.pos += 2 // skip '0x' - if s.pos < s.text.len && s.text[s.pos] == scanner.num_sep { + if s.pos < s.text.len && s.text[s.pos] == num_sep { s.error('separator `_` is only valid between digits in a numeric literal') } for s.pos < s.text.len { c := s.text[s.pos] - if c == scanner.num_sep && s.text[s.pos - 1] == scanner.num_sep { + if c == num_sep && s.text[s.pos - 1] == num_sep { s.error('cannot use `_` consecutively') } - if !c.is_hex_digit() && c != scanner.num_sep { + if !c.is_hex_digit() && c != num_sep { if !c.is_letter() || s.is_inside_string || s.is_nested_string { break } else if !has_wrong_digit { @@ -346,7 +346,7 @@ fn (mut s Scanner) ident_hex_number() string { } s.pos++ } - if s.text[s.pos - 1] == scanner.num_sep { + if s.text[s.pos - 1] == num_sep { s.pos-- s.error('cannot use `_` at the end of a numeric literal') } else if start_pos + 2 == s.pos { @@ -367,15 +367,15 @@ fn (mut s Scanner) ident_oct_number() string { mut first_wrong_digit := `\0` start_pos := s.pos s.pos += 2 // skip '0o' - if s.pos < s.text.len && s.text[s.pos] == scanner.num_sep { + if s.pos < s.text.len && s.text[s.pos] == num_sep { s.error('separator `_` is only valid between digits in a numeric literal') } for s.pos < s.text.len { c := s.text[s.pos] - if c == scanner.num_sep && s.text[s.pos - 1] == scanner.num_sep { + if c == num_sep && s.text[s.pos - 1] == num_sep { s.error('cannot use `_` consecutively') } - if !c.is_oct_digit() && c != scanner.num_sep { + if !c.is_oct_digit() && c != num_sep { if (!c.is_digit() && !c.is_letter()) || s.is_inside_string || s.is_nested_string { break } else if !has_wrong_digit { @@ -386,7 +386,7 @@ fn (mut s Scanner) ident_oct_number() string { } s.pos++ } - if s.text[s.pos - 1] == scanner.num_sep { + if s.text[s.pos - 1] == num_sep { s.pos-- s.error('cannot use `_` at the end of a numeric literal') } else if start_pos + 2 == s.pos { @@ -410,10 +410,10 @@ fn (mut s Scanner) ident_dec_number() string { // scan integer part for s.pos < s.text.len { c := s.text[s.pos] - if c == scanner.num_sep && s.text[s.pos - 1] == scanner.num_sep { + if c == num_sep && s.text[s.pos - 1] == num_sep { s.error('cannot use `_` consecutively') } - if !c.is_digit() && c != scanner.num_sep { + if !c.is_digit() && c != num_sep { if !c.is_letter() || c in [`e`, `E`] || s.is_inside_string || s.is_nested_string { break } else if !has_wrong_digit { @@ -424,7 +424,7 @@ fn (mut s Scanner) ident_dec_number() string { } s.pos++ } - if s.text[s.pos - 1] == scanner.num_sep { + if s.text[s.pos - 1] == num_sep { s.pos-- s.error('cannot use `_` at the end of a numeric literal') } @@ -548,15 +548,15 @@ fn (mut s Scanner) skip_whitespace() { if util.non_whitespace_table[c] { return } - c_is_nl := c == scanner.b_cr || c == scanner.b_lf + c_is_nl := c == b_cr || c == b_lf if c_is_nl && s.is_vh { return } - if s.pos + 1 < s.text.len && c == scanner.b_cr && s.text[s.pos + 1] == scanner.b_lf { + if s.pos + 1 < s.text.len && c == b_cr && s.text[s.pos + 1] == b_lf { s.is_crlf = true } // Count \r\n as one line - if c_is_nl && !(s.pos > 0 && s.text[s.pos - 1] == scanner.b_cr && c == scanner.b_lf) { + if c_is_nl && !(s.pos > 0 && s.text[s.pos - 1] == b_cr && c == b_lf) { s.inc_line_number() } s.pos++ @@ -567,7 +567,7 @@ fn (mut s Scanner) end_of_file() token.Token { s.eofs++ if s.eofs > s.max_eofs { s.line_nr-- - if s.file_path == scanner.internally_generated_v_code { + if s.file_path == internally_generated_v_code { // show a bit more context for that case, since the source may not be easily visible by just inspecting a source file on the filesystem dump(s.text#[0..50]) dump(s.text#[-50..]) @@ -804,7 +804,7 @@ pub fn (mut s Scanner) text_scan() token.Token { `?` { return s.new_token(.question, '?', 1) } - scanner.single_quote, scanner.double_quote { + single_quote, double_quote { start_line := s.line_nr ident_string := s.ident_string() return s.new_multiline_token(.string, ident_string, ident_string.len + 2, @@ -1091,7 +1091,7 @@ pub fn (mut s Scanner) text_scan() token.Token { start := s.pos + 1 s.ignore_line() mut comment_line_end := s.pos - if s.text[s.pos - 1] == scanner.b_cr { + if s.text[s.pos - 1] == b_cr { comment_line_end-- } else { // fix line_nr, \n was read; the comment is marked on the next line @@ -1103,7 +1103,7 @@ pub fn (mut s Scanner) text_scan() token.Token { mut comment := s.line_comment // Find out if this comment is on its own line (for vfmt) mut is_separate_line_comment := true - for j := start - 2; j >= 0 && s.text[j] != scanner.b_lf; j-- { + for j := start - 2; j >= 0 && s.text[j] != b_lf; j-- { if s.text[j] !in [`\t`, ` `] { is_separate_line_comment = false } @@ -1129,7 +1129,7 @@ pub fn (mut s Scanner) text_scan() token.Token { s.line_nr = start_line s.error('unterminated multiline comment') } - if s.text[s.pos] == scanner.b_lf { + if s.text[s.pos] == b_lf { s.inc_line_number() continue } @@ -1215,7 +1215,7 @@ pub fn (mut s Scanner) ident_string() string { col: s.pos - s.last_nl_pos - 1 } q := s.text[s.pos] - is_quote := q in [scanner.single_quote, scanner.double_quote] + is_quote := q in [single_quote, double_quote] is_raw := is_quote && s.pos > 0 && s.text[s.pos - 1] == `r` && !s.is_inside_string is_cstr := is_quote && s.pos > 0 && s.text[s.pos - 1] == `c` && !s.is_inside_string // don't interpret quote as "start of string" quote when a string interpolation has @@ -1234,14 +1234,14 @@ pub fn (mut s Scanner) ident_string() string { if start_char == s.quote || (start_char == s.inter_quote && (s.is_inter_start || s.is_enclosed_inter)) { start++ - } else if start_char == scanner.b_lf { + } else if start_char == b_lf { s.inc_line_number() } s.is_inside_string = false s.u16_escapes_pos.clear() s.u32_escapes_pos.clear() s.h_escapes_pos.clear() - mut backslash_count := if start_char == scanner.backslash { 1 } else { 0 } + mut backslash_count := if start_char == backslash { 1 } else { 0 } for { s.pos++ if s.pos >= s.text.len { @@ -1253,7 +1253,7 @@ pub fn (mut s Scanner) ident_string() string { } c := s.text[s.pos] prevc := s.text[s.pos - 1] - if c == scanner.backslash { + if c == backslash { backslash_count++ } // end of string @@ -1264,10 +1264,10 @@ pub fn (mut s Scanner) ident_string() string { if c == s.inter_quote && (s.is_inter_start || s.is_enclosed_inter) { break } - if c == scanner.b_cr { + if c == b_cr { n_cr_chars++ } - if c == scanner.b_lf { + if c == b_lf { s.inc_line_number() } // Escape `\x` `\u` `\U` @@ -1311,7 +1311,7 @@ pub fn (mut s Scanner) ident_string() string { } // ${var} (ignore in vfmt mode) (skip \$) if prevc == `$` && c == `{` && !is_raw - && s.count_symbol_before(s.pos - 2, scanner.backslash) & 1 == 0 { + && s.count_symbol_before(s.pos - 2, backslash) & 1 == 0 { s.is_inside_string = true if s.is_enclosed_inter { s.is_nested_enclosed_inter = true @@ -1324,13 +1324,13 @@ pub fn (mut s Scanner) ident_string() string { } // $var if prevc == `$` && util.name_char_table[c] && !is_raw - && s.count_symbol_before(s.pos - 2, scanner.backslash) & 1 == 0 { + && s.count_symbol_before(s.pos - 2, backslash) & 1 == 0 { s.is_inside_string = true s.is_inter_start = true s.pos -= 2 break } - if c != scanner.backslash { + if c != backslash { backslash_count = 0 } } @@ -1686,7 +1686,7 @@ fn (mut s Scanner) ignore_line() { @[direct_array_access; inline] fn (mut s Scanner) eat_to_end_of_line() { - for s.pos < s.text.len && s.text[s.pos] != scanner.b_lf { + for s.pos < s.text.len && s.text[s.pos] != b_lf { s.pos++ } } diff --git a/vlib/v/tests/bench/math_big_gcd/prime/maker.v b/vlib/v/tests/bench/math_big_gcd/prime/maker.v index 3c9d62f420c897..8b3f87c5fd1531 100644 --- a/vlib/v/tests/bench/math_big_gcd/prime/maker.v +++ b/vlib/v/tests/bench/math_big_gcd/prime/maker.v @@ -74,7 +74,7 @@ pub fn usage() string { // reads the Map[string] []string from disk // and returns the parsed content fn read_toml_file() map[string][]string { - fp := os.join_path(@VROOT, prime.toml_path) + fp := os.join_path(@VROOT, toml_path) tm_doc := toml.parse_file(fp) or { err_msg := 'expected ${fp}' diff --git a/vlib/v/tests/create_dll/create_win_dll.c.v b/vlib/v/tests/create_dll/create_win_dll.c.v index eedd0e0c79dab3..10a15b4eb10402 100644 --- a/vlib/v/tests/create_dll/create_win_dll.c.v +++ b/vlib/v/tests/create_dll/create_win_dll.c.v @@ -8,7 +8,7 @@ const bar = (foo << 5) + 9 @[export: Tatltuae] pub fn test_tatltuae() int { - return test.foo + test.bar + return foo + bar } @[callconv: stdcall] diff --git a/vlib/v/tests/testdata/modules_in_src/modules/somemoduletwo/somemoduletwo.v b/vlib/v/tests/testdata/modules_in_src/modules/somemoduletwo/somemoduletwo.v index 20b677e1b2939c..457675ebcfc15c 100644 --- a/vlib/v/tests/testdata/modules_in_src/modules/somemoduletwo/somemoduletwo.v +++ b/vlib/v/tests/testdata/modules_in_src/modules/somemoduletwo/somemoduletwo.v @@ -3,5 +3,5 @@ module somemoduletwo const name = 'somemoduletwo' pub fn name() string { - return somemoduletwo.name + return name } diff --git a/vlib/v/tests/testdata/modules_in_src/src/modules/somemodule/somemodule.v b/vlib/v/tests/testdata/modules_in_src/src/modules/somemodule/somemodule.v index 832dec788e985c..e4c843856d4b1b 100644 --- a/vlib/v/tests/testdata/modules_in_src/src/modules/somemodule/somemodule.v +++ b/vlib/v/tests/testdata/modules_in_src/src/modules/somemodule/somemodule.v @@ -3,5 +3,5 @@ module somemodule const name = 'somemodule' pub fn name() string { - return somemodule.name + return name } diff --git a/vlib/v/token/token.v b/vlib/v/token/token.v index 82dda6d8daf2a2..160b015525cbe2 100644 --- a/vlib/v/token/token.v +++ b/vlib/v/token/token.v @@ -206,10 +206,10 @@ pub const scanner_matcher = new_keywords_matcher_trie[Kind](keywords) fn build_keys() map[string]Kind { mut res := map[string]Kind{} for t in int(Kind.keyword_beg) + 1 .. int(Kind.keyword_end) { - key := token.token_str[t] + key := token_str[t] // Exclude custom ORM operators from V keyword list - if key in token.orm_custom_operators { + if key in orm_custom_operators { continue } @@ -357,7 +357,7 @@ fn build_token_str() []string { @[inline] pub fn is_key(key string) bool { - return int(token.keywords[key]) > 0 + return int(keywords[key]) > 0 } @[inline] @@ -368,17 +368,17 @@ pub fn is_decl(t Kind) bool { @[inline] pub fn (t Kind) is_assign() bool { - return t in token.assign_tokens + return t in assign_tokens } // note: used for some code generation, so no quoting @[inline] pub fn (t Kind) str() string { idx := int(t) - if idx < 0 || token.token_str.len <= idx { + if idx < 0 || token_str.len <= idx { return 'unknown' } - return token.token_str[idx] + return token_str[idx] } @[inline] @@ -491,13 +491,13 @@ const precedences = build_precedences() // precedence returns a tokens precedence if defined, otherwise 0 @[direct_array_access; inline] pub fn (tok Token) precedence() int { - return int(token.precedences[tok.kind]) + return int(precedences[tok.kind]) } // precedence returns the precedence of the given token `kind` if defined, otherwise 0 @[direct_array_access; inline] pub fn (kind Kind) precedence() int { - return int(token.precedences[kind]) + return int(precedences[kind]) } // is_scalar returns true if the token is a scalar diff --git a/vlib/v/util/diff/diff.v b/vlib/v/util/diff/diff.v index 9b0f588a2f50cd..e6b6c4204aafdc 100644 --- a/vlib/v/util/diff/diff.v +++ b/vlib/v/util/diff/diff.v @@ -72,7 +72,7 @@ pub fn compare_files(path1 string, path2 string, opts CompareOptions) !string { tool, cmd := opts.find_tool()! mut args := opts.args if args == '' { - args = if defaults := diff.known_diff_tool_defaults[tool] { defaults } else { '' } + args = if defaults := known_diff_tool_defaults[tool] { defaults } else { '' } if opts.tool == .diff { // Ensure that the diff command supports the color option. // E.g., some BSD installations or macOS diff (based on FreeBSD diff) @@ -133,7 +133,7 @@ fn (opts CompareOptions) find_tool() !(DiffTool, string) { // are used. Using a public constant will also allow for external checking of available tools. fn find_working_diff_tools() []DiffTool { mut tools := []DiffTool{} - for tool in diff.known_diff_tool_defaults.keys() { + for tool in known_diff_tool_defaults.keys() { cmd := tool.cmd() os.find_abs_path_of_executable(cmd) or { continue } if tool == .delta { @@ -203,12 +203,12 @@ pub fn color_compare_files(diff_cmd string, path1 string, path2 string) string { if tool == 'diff' { // Ensure that the diff command supports the color option. // E.g., some BSD installations do not include `diffutils` as a core package alongside `diff`. - res := os.execute('${diff_cmd} --color=always ${diff.default_diff_args} ${p1} ${p2}') + res := os.execute('${diff_cmd} --color=always ${default_diff_args} ${p1} ${p2}') if !res.output.starts_with('diff: unrecognized option') { return res.output.trim_right('\r\n') } } - cmd := '${diff_cmd} ${diff.default_diff_args} ${p1} ${p2}' + cmd := '${diff_cmd} ${default_diff_args} ${p1} ${p2}' return os.execute(cmd).output.trim_right('\r\n') } diff --git a/vlib/v/util/errors.v b/vlib/v/util/errors.v index 0590da188220bd..6c8df2a77af6fa 100644 --- a/vlib/v/util/errors.v +++ b/vlib/v/util/errors.v @@ -47,14 +47,14 @@ pub fn (e &EManager) set_support_color(b bool) { } pub fn bold(msg string) string { - if !util.emanager.support_color { + if !emanager.support_color { return msg } return term.bold(msg) } pub fn color(kind string, msg string) string { - if !util.emanager.support_color { + if !emanager.support_color { return msg } if kind.contains('error') { @@ -95,11 +95,11 @@ const verror_paths_absolute = os.getenv('VERROR_PATHS') == 'absolute' pub fn path_styled_for_error_messages(path string) string { mut rpath := os.real_path(path) rpath = rpath.replace('\\', '/') - if util.verror_paths_absolute { + if verror_paths_absolute { return rpath } - if rpath.starts_with(util.normalised_workdir) { - rpath = rpath.replace_once(util.normalised_workdir, '') + if rpath.starts_with(normalised_workdir) { + rpath = rpath.replace_once(normalised_workdir, '') } return rpath } @@ -152,8 +152,8 @@ pub fn source_file_context(kind string, filepath string, pos token.Pos) []string if source_lines.len == 0 { return clines } - bline := mu.max(0, pos.line_nr - util.error_context_before) - aline := mu.max(0, mu.min(source_lines.len - 1, pos.line_nr + util.error_context_after)) + bline := mu.max(0, pos.line_nr - error_context_before) + aline := mu.max(0, mu.min(source_lines.len - 1, pos.line_nr + error_context_after)) tab_spaces := ' ' for iline := bline; iline <= aline; iline++ { sline := source_lines[iline] or { '' } diff --git a/vlib/v/util/quote.v b/vlib/v/util/quote.v index d3f0784ba88234..2c4fc36338d6ce 100644 --- a/vlib/v/util/quote.v +++ b/vlib/v/util/quote.v @@ -62,31 +62,31 @@ pub fn smart_quote(str string, raw bool) string { } else { next = 0 } - if current == util.double_quote { + if current == double_quote { current = 0 - result.write_u8(util.backslash) - result.write_u8(util.double_quote) + result.write_u8(backslash) + result.write_u8(double_quote) continue } - if current == util.backslash { + if current == backslash { if raw { - result.write_string(util.double_escape) + result.write_string(double_escape) continue } - if next == util.backslash { + if next == backslash { // escaped backslash - keep as is current = 0 skip_next = true - result.write_string(util.double_escape) + result.write_string(double_escape) continue } if next != 0 { if raw { skip_next = true - result.write_string(util.double_escape) + result.write_string(double_escape) continue } - if next in util.invalid_escapes { + if next in invalid_escapes { current = 0 skip_next = true result.write_u8(next) @@ -100,14 +100,14 @@ pub fn smart_quote(str string, raw bool) string { continue } } - if current == util.backslash_n { + if current == backslash_n { // keep newlines in string current = 0 - result.write_u8(util.backslash) + result.write_u8(backslash) result.write_u8(`n`) continue } - if current == util.backslash_r && next == util.backslash_n { + if current == backslash_r && next == backslash_n { result.write_u8(current) result.write_u8(next) current = 0 @@ -116,7 +116,7 @@ pub fn smart_quote(str string, raw bool) string { } if !raw { if current == `$` { - if last == util.backslash { + if last == backslash { result.write_u8(last) result.write_u8(current) continue diff --git a/vlib/v/util/scanning.v b/vlib/v/util/scanning.v index a407a18d2b3c47..370c31545dd442 100644 --- a/vlib/v/util/scanning.v +++ b/vlib/v/util/scanning.v @@ -32,12 +32,12 @@ fn get_func_char_table() [256]bool { @[direct_array_access; inline] pub fn is_name_char(c u8) bool { - return util.name_char_table[c] + return name_char_table[c] } @[direct_array_access; inline] pub fn is_func_char(c u8) bool { - return util.func_char_table[c] + return func_char_table[c] } pub fn contains_capital(s string) bool { diff --git a/vlib/v/util/util.v b/vlib/v/util/util.v index 1b5570798537d7..d132647ba5e9f0 100644 --- a/vlib/v/util/util.v +++ b/vlib/v/util/util.v @@ -39,12 +39,12 @@ const const_tabs = [ pub const nr_jobs = runtime.nr_jobs() pub fn module_is_builtin(mod string) bool { - return mod in util.builtin_module_parts + return mod in builtin_module_parts } @[direct_array_access] pub fn tabs(n int) string { - return if n >= 0 && n < util.const_tabs.len { util.const_tabs[n] } else { '\t'.repeat(n) } + return if n >= 0 && n < const_tabs.len { const_tabs[n] } else { '\t'.repeat(n) } } // @@ -120,14 +120,14 @@ const d_sig = "\$d('" // resolve_d_value replaces all occurrences of `$d('ident','value')` // in `str` with either the default `'value'` param or a compile value passed via `-d ident=value`. pub fn resolve_d_value(compile_values map[string]string, str string) !string { - at := str.index(util.d_sig) or { - return error('no "${util.d_sig}' + '...\')" could be found in "${str}".') + at := str.index(d_sig) or { + return error('no "${d_sig}' + '...\')" could be found in "${str}".') } - mut all_parsed := util.d_sig + mut all_parsed := d_sig mut ch := u8(`.`) mut d_ident := '' mut i := 0 - for i = at + util.d_sig.len; i < str.len && ch != `'`; i++ { + for i = at + d_sig.len; i < str.len && ch != `'`; i++ { ch = u8(str[i]) all_parsed += ch.ascii_str() if ch.is_letter() || ch.is_digit() || ch == `_` { @@ -174,7 +174,7 @@ pub fn resolve_d_value(compile_values map[string]string, str string) !string { d_value := compile_values[d_ident] or { d_default_value } // if more `$d()` calls remains, resolve those as well: rep := str.replace_once(all_parsed + ')', d_value) - if rep.contains(util.d_sig) { + if rep.contains(d_sig) { return resolve_d_value(compile_values, rep) } return rep @@ -230,7 +230,7 @@ pub fn launch_tool(is_verbose bool, tool_name string, args []string) { println('launch_tool should_compile: ${should_compile}') } if should_compile { - emodules := util.external_module_dependencies_for_tool[tool_name] + emodules := external_module_dependencies_for_tool[tool_name] for emodule in emodules { check_module_is_installed(emodule, is_verbose, false) or { panic(err) } } @@ -537,7 +537,7 @@ and the existing module `${modulename}` may still work.') } pub fn ensure_modules_for_all_tools_are_installed(is_verbose bool) { - for tool_name, tool_modules in util.external_module_dependencies_for_tool { + for tool_name, tool_modules in external_module_dependencies_for_tool { if is_verbose { eprintln('Installing modules for tool: ${tool_name} ...') } @@ -568,9 +568,9 @@ const map_prefix = 'map[string]' pub fn no_cur_mod(typename string, cur_mod string) string { mut res := typename mod_prefix := cur_mod + '.' - has_map_prefix := res.starts_with(util.map_prefix) + has_map_prefix := res.starts_with(map_prefix) if has_map_prefix { - res = res.replace_once(util.map_prefix, '') + res = res.replace_once(map_prefix, '') } no_symbols := res.trim_left('&[]') should_shorten := no_symbols.starts_with(mod_prefix) @@ -578,7 +578,7 @@ pub fn no_cur_mod(typename string, cur_mod string) string { res = res.replace_once(mod_prefix, '') } if has_map_prefix { - res = util.map_prefix + res + res = map_prefix + res } return res } @@ -613,8 +613,7 @@ pub fn get_vtmp_folder() string { } pub fn should_bundle_module(mod string) bool { - return mod in util.bundle_modules - || (mod.contains('.') && mod.all_before('.') in util.bundle_modules) + return mod in bundle_modules || (mod.contains('.') && mod.all_before('.') in bundle_modules) } // find_all_v_files - given a list of files/folders, finds all .v/.vsh files diff --git a/vlib/v/util/version/version.v b/vlib/v/util/version/version.v index 51fb5c779d894d..78723c88380f75 100644 --- a/vlib/v/util/version/version.v +++ b/vlib/v/util/version/version.v @@ -16,9 +16,9 @@ pub fn full_hash() string { // full_v_version() returns the full version of the V compiler pub fn full_v_version(is_verbose bool) string { if is_verbose { - return 'V ${version.v_version} ${full_hash()}' + return 'V ${v_version} ${full_hash()}' } - return 'V ${version.v_version} ${@VCURRENTHASH}' + return 'V ${v_version} ${@VCURRENTHASH}' } // githash tries to find the current git commit hash for the specified diff --git a/vlib/v/vmod/parser.v b/vlib/v/vmod/parser.v index 7b9b5b83bb9830..376105fe08b151 100644 --- a/vlib/v/vmod/parser.v +++ b/vlib/v/vmod/parser.v @@ -162,7 +162,7 @@ fn get_array_content(tokens []Token, st_idx int) !([]string, int) { mut vals := []string{} mut idx := st_idx if tokens[idx].typ != .labr { - return error('${vmod.err_label} not a valid array, at line ${tokens[idx].line}') + return error('${err_label} not a valid array, at line ${tokens[idx].line}') } idx++ for { @@ -171,7 +171,7 @@ fn get_array_content(tokens []Token, st_idx int) !([]string, int) { .str { vals << tok.val if tokens[idx + 1].typ !in [.comma, .rabr] { - return error('${vmod.err_label} invalid separator "${tokens[idx + 1].val}", at line ${tok.line}') + return error('${err_label} invalid separator "${tokens[idx + 1].val}", at line ${tok.line}') } idx += if tokens[idx + 1].typ == .comma { 2 } else { 1 } } @@ -180,7 +180,7 @@ fn get_array_content(tokens []Token, st_idx int) !([]string, int) { break } else { - return error('${vmod.err_label} invalid token "${tok.val}", at line ${tok.line}') + return error('${err_label} invalid token "${tok.val}", at line ${tok.line}') } } } @@ -189,13 +189,13 @@ fn get_array_content(tokens []Token, st_idx int) !([]string, int) { fn (mut p Parser) parse() !Manifest { if p.scanner.text.len == 0 { - return error('${vmod.err_label} no content.') + return error('${err_label} no content.') } p.scanner.scan_all() tokens := p.scanner.tokens mut mn := Manifest{} if tokens[0].typ != .module_keyword { - return error('${vmod.err_label} v.mod files should start with Module, at line ${tokens[0].line}') + return error('${err_label} v.mod files should start with Module, at line ${tokens[0].line}') } mut i := 1 for i < tokens.len { @@ -203,7 +203,7 @@ fn (mut p Parser) parse() !Manifest { match tok.typ { .lcbr { if tokens[i + 1].typ !in [.field_key, .rcbr] { - return error('${vmod.err_label} invalid content after opening brace, at line ${tok.line}') + return error('${err_label} invalid content after opening brace, at line ${tok.line}') } i++ continue @@ -214,7 +214,7 @@ fn (mut p Parser) parse() !Manifest { .field_key { field_name := tok.val.trim_right(':') if tokens[i + 1].typ !in [.str, .labr] { - return error('${vmod.err_label} value of field "${field_name}" must be either string or an array of strings, at line ${tok.line}') + return error('${err_label} value of field "${field_name}" must be either string or an array of strings, at line ${tok.line}') } field_value := tokens[i + 1].val match field_name { @@ -257,13 +257,13 @@ fn (mut p Parser) parse() !Manifest { } .comma { if tokens[i - 1].typ !in [.str, .rabr] || tokens[i + 1].typ != .field_key { - return error('${vmod.err_label} invalid comma placement, at line ${tok.line}') + return error('${err_label} invalid comma placement, at line ${tok.line}') } i++ continue } else { - return error('${vmod.err_label} invalid token "${tok.val}", at line ${tok.line}') + return error('${err_label} invalid token "${tok.val}", at line ${tok.line}') } } } diff --git a/vlib/v/vmod/vmod.v b/vlib/v/vmod/vmod.v index c7a59147af4cba..f62e8a8e5caec7 100644 --- a/vlib/v/vmod/vmod.v +++ b/vlib/v/vmod/vmod.v @@ -8,7 +8,7 @@ const mod_file_stop_paths = ['.git', '.hg', '.svn', '.v.mod.stop'] const private_file_cacher = new_mod_file_cacher() pub fn get_cache() &ModFileCacher { - return vmod.private_file_cacher + return private_file_cacher } // This file provides a caching mechanism for seeking quickly whether a @@ -144,7 +144,7 @@ fn (mut mcache ModFileCacher) mark_folders_as_vmod_free(folders_so_far []string) } fn (mcache &ModFileCacher) check_for_stop(cfolder string, files []string) bool { - for i in vmod.mod_file_stop_paths { + for i in mod_file_stop_paths { if i in files { return true } diff --git a/vlib/v2/ast/ast.v b/vlib/v2/ast/ast.v index 054478c10b3ba4..adb2e4a94ca94a 100644 --- a/vlib/v2/ast/ast.v +++ b/vlib/v2/ast/ast.v @@ -229,11 +229,11 @@ pub fn (lang Language) str() string { // Expressions pub struct ArrayInitExpr { pub: - typ Expr = ast.empty_expr + typ Expr = empty_expr exprs []Expr - init Expr = ast.empty_expr - cap Expr = ast.empty_expr - len Expr = ast.empty_expr + init Expr = empty_expr + cap Expr = empty_expr + len Expr = empty_expr pos token.Pos } @@ -287,8 +287,8 @@ pub: pub struct FieldDecl { pub: name string - typ Expr = ast.empty_expr // can be empty as used for const (unless we use something else) - value Expr = ast.empty_expr + typ Expr = empty_expr // can be empty as used for const (unless we use something else) + value Expr = empty_expr attributes []Attribute } @@ -326,8 +326,8 @@ pub: pub struct IfExpr { pub: - cond Expr = ast.empty_expr - else_expr Expr = ast.empty_expr + cond Expr = empty_expr + else_expr Expr = empty_expr stmts []Stmt } @@ -388,7 +388,7 @@ pub: pub struct MapInitExpr { pub: - typ Expr = ast.empty_expr + typ Expr = empty_expr keys []Expr vals []Expr pos token.Pos @@ -477,7 +477,7 @@ pub: pos token.Pos stmt Stmt stmts []Stmt - next Expr = ast.empty_expr + next Expr = empty_expr } pub struct SelectorExpr { @@ -562,7 +562,7 @@ pub: expr Expr // TEMP: prob removed once individual // fields are set, precision etc - format_expr Expr = ast.empty_expr + format_expr Expr = empty_expr } pub enum StringInterFormat { @@ -630,7 +630,7 @@ pub: pub struct AssertStmt { pub: expr Expr - extra Expr = ast.empty_expr + extra Expr = empty_expr } pub struct AssignStmt { @@ -690,7 +690,7 @@ pub: attributes []Attribute is_public bool name string - as_type Expr = ast.empty_expr + as_type Expr = empty_expr fields []FieldDecl } @@ -726,9 +726,9 @@ pub: pub struct ForStmt { pub: - init Stmt = ast.empty_stmt // initialization - cond Expr = ast.empty_expr // condition - post Stmt = ast.empty_stmt // post iteration (afterthought) + init Stmt = empty_stmt // initialization + cond Expr = empty_expr // condition + post Stmt = empty_stmt // post iteration (afterthought) stmts []Stmt } @@ -740,7 +740,7 @@ pub: // value_is_mut bool // expr Expr // TODO: - key Expr = ast.empty_expr + key Expr = empty_expr value Expr expr Expr } @@ -772,7 +772,7 @@ pub: pub struct LabelStmt { pub: name string - stmt Stmt = ast.empty_stmt + stmt Stmt = empty_stmt } pub struct ModuleStmt { @@ -803,7 +803,7 @@ pub: language Language name string generic_params []Expr - base_type Expr = ast.empty_expr + base_type Expr = empty_expr variants []Expr } @@ -827,14 +827,14 @@ pub: pub struct ThreadType { pub: - elem_type Expr = ast.empty_expr + elem_type Expr = empty_expr } pub struct FnType { pub: generic_params []Expr params []Parameter - return_type Expr = ast.empty_expr + return_type Expr = empty_expr } pub fn (ft &FnType) str() string { @@ -890,12 +890,12 @@ pub struct NoneType {} pub struct OptionType { pub: - base_type Expr = ast.empty_expr + base_type Expr = empty_expr } pub struct ResultType { pub: - base_type Expr = ast.empty_expr + base_type Expr = empty_expr } pub struct TupleType { diff --git a/vlib/v2/gen/v/gen.v b/vlib/v2/gen/v/gen.v index 46e8bba05d05fb..0afc758f4f727a 100644 --- a/vlib/v2/gen/v/gen.v +++ b/vlib/v2/gen/v/gen.v @@ -988,7 +988,7 @@ fn (mut g Gen) generic_list(exprs []ast.Expr) { @[inline] fn (mut g Gen) write(str string) { if g.on_newline { - g.out.write_string(v.tabs[g.indent]) + g.out.write_string(tabs[g.indent]) } g.out.write_string(str) g.on_newline = false @@ -997,7 +997,7 @@ fn (mut g Gen) write(str string) { @[inline] fn (mut g Gen) writeln(str string) { if g.on_newline { - g.out.write_string(v.tabs[g.indent]) + g.out.write_string(tabs[g.indent]) } g.out.writeln(str) g.on_newline = true diff --git a/vlib/v2/types/universe.v b/vlib/v2/types/universe.v index e0f5adef6ee27c..40ea3581d1d4e9 100644 --- a/vlib/v2/types/universe.v +++ b/vlib/v2/types/universe.v @@ -104,35 +104,35 @@ const thread_ = Thread{} pub fn init_universe() &Scope { // universe scope mut universe_ := new_scope(unsafe { nil }) - universe_.insert('bool', Type(types.bool_)) - universe_.insert('i8', Type(types.i8_)) - universe_.insert('i16', Type(types.i16_)) - universe_.insert('i32', Type(types.i32_)) - universe_.insert('int', Type(types.int_)) - universe_.insert('i64', Type(types.i64_)) - universe_.insert('u8', Type(types.u8_)) - universe_.insert('byte', Type(types.byte_)) - universe_.insert('u16', Type(types.u16_)) - universe_.insert('u32', Type(types.u32_)) - universe_.insert('u64', Type(types.u64_)) - universe_.insert('f32', Type(types.f32_)) - universe_.insert('f64', Type(types.f64_)) + universe_.insert('bool', Type(bool_)) + universe_.insert('i8', Type(i8_)) + universe_.insert('i16', Type(i16_)) + universe_.insert('i32', Type(i32_)) + universe_.insert('int', Type(int_)) + universe_.insert('i64', Type(i64_)) + universe_.insert('u8', Type(u8_)) + universe_.insert('byte', Type(byte_)) + universe_.insert('u16', Type(u16_)) + universe_.insert('u32', Type(u32_)) + universe_.insert('u64', Type(u64_)) + universe_.insert('f32', Type(f32_)) + universe_.insert('f64', Type(f64_)) // TODO: - universe_.insert('string', Type(types.string_)) - universe_.insert('chan', Type(types.chan_)) - universe_.insert('char', Type(types.char_)) - universe_.insert('isize', Type(types.isize_)) - universe_.insert('usize', Type(types.usize_)) - universe_.insert('rune', Type(types.rune_)) - universe_.insert('void', Type(types.void_)) - universe_.insert('nil', Type(types.nil_)) - universe_.insert('none', Type(types.nil_)) - universe_.insert('byteptr', Type(types.byteptr_)) - universe_.insert('charptr', Type(types.charptr_)) - universe_.insert('voidptr', Type(types.voidptr_)) - universe_.insert('int_literal', Type(types.int_literal_)) - universe_.insert('float_literal', Type(types.float_literal_)) - universe_.insert('float_literal', Type(types.float_literal_)) - universe_.insert('thread', Type(types.thread_)) + universe_.insert('string', Type(string_)) + universe_.insert('chan', Type(chan_)) + universe_.insert('char', Type(char_)) + universe_.insert('isize', Type(isize_)) + universe_.insert('usize', Type(usize_)) + universe_.insert('rune', Type(rune_)) + universe_.insert('void', Type(void_)) + universe_.insert('nil', Type(nil_)) + universe_.insert('none', Type(nil_)) + universe_.insert('byteptr', Type(byteptr_)) + universe_.insert('charptr', Type(charptr_)) + universe_.insert('voidptr', Type(voidptr_)) + universe_.insert('int_literal', Type(int_literal_)) + universe_.insert('float_literal', Type(float_literal_)) + universe_.insert('float_literal', Type(float_literal_)) + universe_.insert('thread', Type(thread_)) return universe_ } diff --git a/vlib/veb/auth/auth.v b/vlib/veb/auth/auth.v index ea4cab3635da7e..1da3225dbfc38b 100644 --- a/vlib/veb/auth/auth.v +++ b/vlib/veb/auth/auth.v @@ -72,7 +72,7 @@ pub fn set_rand_crypto_safe_seed() { } fn generate_crypto_safe_int_u32() u32 { - return u32(crypto_rand.int_u64(auth.max_safe_unsigned_integer) or { 0 }) + return u32(crypto_rand.int_u64(max_safe_unsigned_integer) or { 0 }) } pub fn generate_salt() string { diff --git a/vlib/veb/middleware.v b/vlib/veb/middleware.v index 3365f6bcbab67c..f68823a53050aa 100644 --- a/vlib/veb/middleware.v +++ b/vlib/veb/middleware.v @@ -225,7 +225,7 @@ pub fn (options &CorsOptions) set_headers(mut ctx Context) { } else if _ := ctx.req.header.get(.access_control_request_headers) { // a server must respond with `Access-Control-Allow-Headers` if // `Access-Control-Request-Headers` is present in a preflight request - ctx.set_header(.access_control_allow_headers, veb.cors_safelisted_response_headers.join(',')) + ctx.set_header(.access_control_allow_headers, cors_safelisted_response_headers.join(',')) } if options.allowed_methods.len > 0 { diff --git a/vlib/veb/parse.v b/vlib/veb/parse.v index 314527a54d04a4..f854f33bcb7162 100644 --- a/vlib/veb/parse.v +++ b/vlib/veb/parse.v @@ -77,11 +77,11 @@ fn parse_form_from_request(request http.Request) !(map[string]string, map[string } ct := request.header.get(.content_type) or { '' }.split(';').map(it.trim_left(' \t')) if 'multipart/form-data' in ct { - boundaries := ct.filter(it.starts_with(veb.boundary_start)) + boundaries := ct.filter(it.starts_with(boundary_start)) if boundaries.len != 1 { return error('detected more that one form-data boundary') } - boundary := boundaries[0].all_after(veb.boundary_start) + boundary := boundaries[0].all_after(boundary_start) if boundary.len > 0 && boundary[0] == `"` { // quotes are send by our http.post_multipart_form/2: return http.parse_multipart_form(request.data, boundary.trim('"')) diff --git a/vlib/veb/veb_livereload.v b/vlib/veb/veb_livereload.v index 228bf94c6ca755..305c6f9cb8f526 100644 --- a/vlib/veb/veb_livereload.v +++ b/vlib/veb/veb_livereload.v @@ -21,7 +21,7 @@ const veb_livereload_server_start = time.ticks().str() // timestamp/ticks corresponding to when the veb server process was started @[if veb_livereload ?] fn (mut ctx Context) handle_veb_livereload_current() { - ctx.send_response_to_client('text/plain', veb.veb_livereload_server_start) + ctx.send_response_to_client('text/plain', veb_livereload_server_start) } // handle_veb_livereload_script serves a small dynamically generated .js file, @@ -42,7 +42,7 @@ function veb_livereload_checker_fn(started_at) { } }); } -const veb_livereload_checker = setInterval(veb_livereload_checker_fn, ${ctx.livereload_poll_interval_ms}, "${veb.veb_livereload_server_start}"); +const veb_livereload_checker = setInterval(veb_livereload_checker_fn, ${ctx.livereload_poll_interval_ms}, "${veb_livereload_server_start}"); ' ctx.send_response_to_client('text/javascript', res) } diff --git a/vlib/vweb/assets/assets.v b/vlib/vweb/assets/assets.v index 8202aed83db9c2..75c6f4cb04bce4 100644 --- a/vlib/vweb/assets/assets.v +++ b/vlib/vweb/assets/assets.v @@ -188,7 +188,7 @@ pub fn (mut am AssetManager) add(asset_type string, file string) bool { } else if asset_type == 'js' { am.js << asset } else { - panic('${assets.unknown_asset_type_error} (${asset_type}).') + panic('${unknown_asset_type_error} (${asset_type}).') } return true } @@ -205,7 +205,7 @@ fn (am AssetManager) exists(asset_type string, file string) bool { fn (am AssetManager) get_assets(asset_type string) []Asset { if asset_type != 'css' && asset_type != 'js' { - panic('${assets.unknown_asset_type_error} (${asset_type}).') + panic('${unknown_asset_type_error} (${asset_type}).') } assets := if asset_type == 'css' { am.css } else { am.js } return assets diff --git a/vlib/vweb/parse.v b/vlib/vweb/parse.v index 05a13b40772034..20ff8a04f68fd8 100644 --- a/vlib/vweb/parse.v +++ b/vlib/vweb/parse.v @@ -75,11 +75,11 @@ fn parse_form_from_request(request http.Request) !(map[string]string, map[string } ct := request.header.get(.content_type) or { '' }.split(';').map(it.trim_left(' \t')) if 'multipart/form-data' in ct { - boundaries := ct.filter(it.starts_with(vweb.boundary_start)) + boundaries := ct.filter(it.starts_with(boundary_start)) if boundaries.len != 1 { return error('detected more that one form-data boundary') } - boundary := boundaries[0].all_after(vweb.boundary_start) + boundary := boundaries[0].all_after(boundary_start) if boundary.len > 0 && boundary[0] == `"` { // quotes are send by our http.post_multipart_form/2: return http.parse_multipart_form(request.data, boundary.trim('"')) diff --git a/vlib/vweb/tests/vweb_test_server/server.v b/vlib/vweb/tests/vweb_test_server/server.v index 28573318cabf66..887efc1813d26d 100644 --- a/vlib/vweb/tests/vweb_test_server/server.v +++ b/vlib/vweb/tests/vweb_test_server/server.v @@ -75,7 +75,7 @@ pub fn (mut app App) html_page() vweb.Result { @['/:user/settings'] pub fn (mut app App) settings(username string) vweb.Result { linfo('>>>>> ${@LOCATION}, username: ${username}') - if username !in vweb_test_server.known_users { + if username !in known_users { return app.not_found() } return app.html('username: ${username}') @@ -84,7 +84,7 @@ pub fn (mut app App) settings(username string) vweb.Result { @['/:user/:repo/settings'] pub fn (mut app App) user_repo_settings(username string, repository string) vweb.Result { linfo('>>>>> ${@LOCATION}, username: ${username}, repository: ${repository}') - if username !in vweb_test_server.known_users { + if username !in known_users { return app.not_found() } return app.html('username: ${username} | repository: ${repository}') diff --git a/vlib/vweb/vweb.v b/vlib/vweb/vweb.v index 0434344b8b1a9b..f440d2222aa4ab 100644 --- a/vlib/vweb/vweb.v +++ b/vlib/vweb/vweb.v @@ -243,7 +243,7 @@ pub fn (mut ctx Context) send_response_to_client(mimetype string, res string) bo http.CommonHeader.content_type: mimetype http.CommonHeader.content_length: resp.body.len.str() }).join(ctx.header) - resp.header = header.join(vweb.headers_close) + resp.header = header.join(headers_close) resp.set_version(.v1_1) resp.set_status(http.status_from_int(ctx.status.int())) @@ -291,7 +291,7 @@ pub fn (mut ctx Context) file(f_path string) Result { ctx.server_error(500) return Result{} } - content_type := vweb.mime_types[ext] + content_type := mime_types[ext] if content_type.len == 0 { eprintln('[vweb] no MIME type found for extension ${ext}') ctx.server_error(500) @@ -317,7 +317,7 @@ pub fn (mut ctx Context) server_error(ecode int) Result { if ctx.done { return Result{} } - send_string(mut ctx.conn, vweb.http_500.bytestr()) or {} + send_string(mut ctx.conn, http_500.bytestr()) or {} return Result{} } @@ -333,9 +333,9 @@ pub fn (mut ctx Context) redirect(url string, params RedirectParams) Result { return Result{} } ctx.done = true - mut resp := vweb.http_302 + mut resp := http_302 if params.status_code == 303 { - resp = vweb.http_303 + resp = http_303 } resp.header = resp.header.join(ctx.header) resp.header.add(.location, url) @@ -350,7 +350,7 @@ pub fn (mut ctx Context) not_found() Result { return Result{} } ctx.done = true - send_string(mut ctx.conn, vweb.http_404.bytestr()) or {} + send_string(mut ctx.conn, http_404.bytestr()) or {} return Result{} } @@ -718,7 +718,7 @@ fn handle_conn[T](mut conn net.TcpConn, global_app &T, controllers []&Controller // Form parse form, files := parse_form_from_request(req) or { // Bad request - conn.write(vweb.http_400.bytes()) or {} + conn.write(http_400.bytes()) or {} return } @@ -984,7 +984,7 @@ fn serve_if_static[T](mut app T, url urllib.URL, host string) bool { return false } data := os.read_file(static_file) or { - send_string(mut app.conn, vweb.http_404.bytestr()) or {} + send_string(mut app.conn, http_404.bytestr()) or {} return true } app.send_response_to_client(mime_type, data) @@ -1004,7 +1004,7 @@ fn (mut ctx Context) scan_static_directory(directory_path string, mount_path str ext := os.file_ext(file) // Rudimentary guard against adding files not in mime_types. // Use host_serve_static directly to add non-standard mime types. - if ext in vweb.mime_types { + if ext in mime_types { ctx.host_serve_static(host, mount_path.trim_right('/') + '/' + file, full_path) } @@ -1087,7 +1087,7 @@ pub fn (mut ctx Context) host_serve_static(host string, url string, file_path st ctx.static_files[url] = file_path // ctx.static_mime_types[url] = mime_type ext := os.file_ext(file_path) - ctx.static_mime_types[url] = vweb.mime_types[ext] + ctx.static_mime_types[url] = mime_types[ext] ctx.static_hosts[url] = host } diff --git a/vlib/vweb/vweb_livereload.v b/vlib/vweb/vweb_livereload.v index 5e0fcdca2d6531..9800ca34f479f0 100644 --- a/vlib/vweb/vweb_livereload.v +++ b/vlib/vweb/vweb_livereload.v @@ -21,7 +21,7 @@ const vweb_livereload_server_start = time.ticks().str() // timestamp/ticks corresponding to when the vweb server process was started @[if vweb_livereload ?] fn (mut ctx Context) handle_vweb_livereload_current() { - ctx.send_response_to_client('text/plain', vweb.vweb_livereload_server_start) + ctx.send_response_to_client('text/plain', vweb_livereload_server_start) } // handle_vweb_livereload_script serves a small dynamically generated .js file, @@ -42,7 +42,7 @@ function vweb_livereload_checker_fn(started_at) { } }); } -const vweb_livereload_checker = setInterval(vweb_livereload_checker_fn, ${ctx.livereload_poll_interval_ms}, "${vweb.vweb_livereload_server_start}"); +const vweb_livereload_checker = setInterval(vweb_livereload_checker_fn, ${ctx.livereload_poll_interval_ms}, "${vweb_livereload_server_start}"); ' ctx.send_response_to_client('text/javascript', res) } diff --git a/vlib/x/crypto/chacha20/chacha.v b/vlib/x/crypto/chacha20/chacha.v index 69633f8ad5336b..68f1bb3d58f69f 100644 --- a/vlib/x/crypto/chacha20/chacha.v +++ b/vlib/x/crypto/chacha20/chacha.v @@ -97,26 +97,25 @@ pub fn (mut c Cipher) xor_key_stream(mut dst []u8, src []u8) { // // Let's process for multiple blocks // number of blocks the src bytes should be split into - nr_blocks := src.len / chacha20.block_size + nr_blocks := src.len / block_size for i := 0; i < nr_blocks; i++ { // generate ciphers keystream block, stored in c.block c.generic_key_stream() // get current src block to be xor-ed - block := unsafe { src[i * chacha20.block_size..(i + 1) * chacha20.block_size] } + block := unsafe { src[i * block_size..(i + 1) * block_size] } // instead allocating output buffer for every block, we use dst buffer directly. // xor current block of plaintext with keystream in c.block - n := cipher.xor_bytes(mut dst[i * chacha20.block_size..(i + 1) * chacha20.block_size], - block, c.block) + n := cipher.xor_bytes(mut dst[i * block_size..(i + 1) * block_size], block, c.block) assert n == c.block.len } // process for partial block - if src.len % chacha20.block_size != 0 { + if src.len % block_size != 0 { c.generic_key_stream() // get the remaining last partial block - block := unsafe { src[nr_blocks * chacha20.block_size..] } + block := unsafe { src[nr_blocks * block_size..] } // xor block with keystream - _ := cipher.xor_bytes(mut dst[nr_blocks * chacha20.block_size..], block, c.block) + _ := cipher.xor_bytes(mut dst[nr_blocks * block_size..], block, c.block) } } @@ -180,11 +179,11 @@ pub fn (mut c Cipher) rekey(key []u8, nonce []u8) ! { @[direct_array_access] fn (mut c Cipher) do_rekey(key []u8, nonce []u8) ! { // check for correctness of key and nonce length - if key.len != chacha20.key_size { + if key.len != key_size { return error('chacha20: bad key size provided ') } // check for nonce's length is 12 or 24 - if nonce.len != chacha20.nonce_size && nonce.len != chacha20.x_nonce_size { + if nonce.len != nonce_size && nonce.len != x_nonce_size { return error('chacha20: bad nonce size provided') } mut nonces := nonce.clone() @@ -192,18 +191,18 @@ fn (mut c Cipher) do_rekey(key []u8, nonce []u8) ! { // if nonce's length is 24 bytes, we derive a new key and nonce with xchacha20 function // and supplied to setup process. - if nonces.len == chacha20.x_nonce_size { + if nonces.len == x_nonce_size { keys = xchacha20(keys, nonces[0..16])! - mut cnonce := []u8{len: chacha20.nonce_size} + mut cnonce := []u8{len: nonce_size} _ := copy(mut cnonce[4..12], nonces[16..24]) nonces = cnonce.clone() - } else if nonces.len != chacha20.nonce_size { + } else if nonces.len != nonce_size { return error('chacha20: wrong nonce size') } // bounds check elimination hint - _ = keys[chacha20.key_size - 1] - _ = nonces[chacha20.nonce_size - 1] + _ = keys[key_size - 1] + _ = nonces[nonce_size - 1] // setup ChaCha20 cipher key c.key[0] = binary.little_endian_u32(keys[0..4]) @@ -233,7 +232,7 @@ fn (mut c Cipher) chacha20_block() { // 12:bbbbbbbb 13:nnnnnnnn 14:nnnnnnnn 15:nnnnnnnn // // where c=constant k=key b=blockcounter n=nonce - c0, c1, c2, c3 := chacha20.cc0, chacha20.cc1, chacha20.cc2, chacha20.cc3 + c0, c1, c2, c3 := cc0, cc1, cc2, cc3 c4 := c.key[0] c5 := c.key[1] c6 := c.key[2] @@ -371,14 +370,14 @@ fn quarter_round(a u32, b u32, c u32, d u32) (u32, u32, u32, u32) { // encrypt_with_counter encrypts plaintext with internal counter set to ctr fn encrypt_with_counter(key []u8, nonce []u8, ctr u32, plaintext []u8) ![]u8 { - if key.len != chacha20.key_size { + if key.len != key_size { return error('bad key size') } - if nonce.len == chacha20.x_nonce_size { + if nonce.len == x_nonce_size { ciphertext := xchacha20_encrypt_with_counter(key, nonce, ctr, plaintext)! return ciphertext } - if nonce.len == chacha20.nonce_size { + if nonce.len == nonce_size { ciphertext := chacha20_encrypt_with_counter(key, nonce, ctr, plaintext)! return ciphertext } diff --git a/vlib/x/crypto/chacha20/chacha_test.v b/vlib/x/crypto/chacha20/chacha_test.v index 7b9212be409e09..318fdc10d8e545 100644 --- a/vlib/x/crypto/chacha20/chacha_test.v +++ b/vlib/x/crypto/chacha20/chacha_test.v @@ -48,7 +48,7 @@ struct BlockCase { } fn test_chacha20_no_overlap_xor_key_stream() ! { - for i, t in chacha20.xorkeystream_testcases { + for i, t in xorkeystream_testcases { key := hex.decode(t.key)! nonce := hex.decode(t.nonce)! mut cs := new_cipher(key, nonce)! @@ -67,7 +67,7 @@ fn test_chacha20_no_overlap_xor_key_stream() ! { } fn test_chacha20_block_function() ! { - for val in chacha20.blocks_testcases { + for val in blocks_testcases { key_bytes := hex.decode(val.key)! nonce_bytes := hex.decode(val.nonce)! mut cs := new_cipher(key_bytes, nonce_bytes)! @@ -107,7 +107,7 @@ fn test_chacha20_quarter_round() { // test poly1305 key generator as specified in https://datatracker.ietf.org/doc/html/rfc8439#section-2.6.2 fn test_chacha20_onetime_poly1305_key_generation() ! { - for i, v in chacha20.otk_cases { + for i, v in otk_cases { key := hex.decode(v.key)! nonce := hex.decode(v.nonce)! @@ -208,7 +208,7 @@ struct EncryptionCase { } fn test_chacha20_cipher_encrypt() ! { - for c in chacha20.encryption_test_cases { + for c in encryption_test_cases { key_bytes := hex.decode(c.key)! nonce_bytes := hex.decode(c.nonce)! plaintext_bytes := hex.decode(c.plaintext)! @@ -225,7 +225,7 @@ fn test_chacha20_cipher_encrypt() ! { } fn test_chacha20_cipher_decrypt() ! { - for c in chacha20.encryption_test_cases { + for c in encryption_test_cases { key_bytes := hex.decode(c.key)! nonce_bytes := hex.decode(c.nonce)! diff --git a/vlib/x/crypto/chacha20/xchacha.v b/vlib/x/crypto/chacha20/xchacha.v index 496e1a97d68368..785ac93bb37a34 100644 --- a/vlib/x/crypto/chacha20/xchacha.v +++ b/vlib/x/crypto/chacha20/xchacha.v @@ -18,7 +18,7 @@ fn xchacha20(key []u8, nonce []u8) ![]u8 { if key.len != key_size { return error('xchacha: Bad key size') } - if nonce.len != chacha20.h_nonce_size { + if nonce.len != h_nonce_size { return error('xchacha: Bad nonce size') } // initializes ChaCha20 state diff --git a/vlib/x/crypto/chacha20/xchacha_test.v b/vlib/x/crypto/chacha20/xchacha_test.v index fab05761747458..52d40bd6d20c5c 100644 --- a/vlib/x/crypto/chacha20/xchacha_test.v +++ b/vlib/x/crypto/chacha20/xchacha_test.v @@ -74,7 +74,7 @@ struct XChachaTestCases { } fn test_xchacha20_encrypt_vector_test() ! { - for c in chacha20.xchacha_vector_test { + for c in xchacha_vector_test { plaintext_bytes := hex.decode(c.input)! key_bytes := hex.decode(c.key)! // assert key_bytes.len == 32 diff --git a/vlib/x/crypto/chacha20poly1305/chacha20poly1305.v b/vlib/x/crypto/chacha20poly1305/chacha20poly1305.v index 235cac679b66d1..b056292a9a8cb7 100644 --- a/vlib/x/crypto/chacha20poly1305/chacha20poly1305.v +++ b/vlib/x/crypto/chacha20poly1305/chacha20poly1305.v @@ -67,17 +67,17 @@ pub fn decrypt(ciphertext []u8, key []u8, nonce []u8, ad []u8) ![]u8 { // Chacha20Poly1305 represents AEAD algorithm backed by `x.crypto.chacha20` and `x.crypto.poly1305`. struct Chacha20Poly1305 { - key []u8 = []u8{len: chacha20poly1305.key_size} - ncsize int = chacha20poly1305.nonce_size + key []u8 = []u8{len: key_size} + ncsize int = nonce_size } // new creates a new Chacha20Poly1305 AEAD instance with given 32 bytes of key // and the nonce size in ncsize. The ncsize should be 12 or 24 length, otherwise it would return error. pub fn new(key []u8, ncsize int) !&AEAD { - if key.len != chacha20poly1305.key_size { + if key.len != key_size { return error('chacha20poly1305: bad key size') } - if ncsize != chacha20poly1305.nonce_size && ncsize != chacha20poly1305.x_nonce_size { + if ncsize != nonce_size && ncsize != x_nonce_size { return error('chacha20poly1305: bad nonce size supplied, its should 12 or 24') } c := &Chacha20Poly1305{ @@ -95,7 +95,7 @@ pub fn (c Chacha20Poly1305) nonce_size() int { // overhead returns maximum difference between the lengths of a plaintext to be encrypted and // ciphertext's output. In the context of Chacha20Poly1305, `.overhead() == .tag_size`. pub fn (c Chacha20Poly1305) overhead() int { - return chacha20poly1305.tag_size + return tag_size } // encrypt encrypts plaintext, along with nonce and additional data and generates @@ -125,7 +125,7 @@ fn (c Chacha20Poly1305) encrypt_generic(plaintext []u8, nonce []u8, ad []u8) ![] // and given nonce. Actually its generates by performing ChaCha20 key stream function, // and take the first 32 bytes as a one-time key for Poly1305 from 64 bytes results. // see https://datatracker.ietf.org/doc/html/rfc8439#section-2.6 - mut polykey := []u8{len: chacha20poly1305.key_size} + mut polykey := []u8{len: key_size} mut s := chacha20.new_cipher(c.key, nonce)! s.xor_key_stream(mut polykey, polykey) @@ -143,7 +143,7 @@ fn (c Chacha20Poly1305) encrypt_generic(plaintext []u8, nonce []u8, ad []u8) ![] // Lets creates Poly1305 instance with one-time key generates in above step, // updates Poly1305 state with this constructed_msg and finally generates tag. - mut tag := []u8{len: chacha20poly1305.tag_size} + mut tag := []u8{len: tag_size} mut po := poly1305.new(polykey)! po.update(constructed_msg) po.finish(mut tag) @@ -175,7 +175,7 @@ pub fn (c Chacha20Poly1305) decrypt(ciphertext []u8, nonce []u8, ad []u8) ![]u8 fn (c Chacha20Poly1305) decrypt_generic(ciphertext []u8, nonce []u8, ad []u8) ![]u8 { // generates poly1305 one-time key for later calculation - mut polykey := []u8{len: chacha20poly1305.key_size} + mut polykey := []u8{len: key_size} mut s := chacha20.new_cipher(c.key, nonce)! s.xor_key_stream(mut polykey, polykey) @@ -192,7 +192,7 @@ fn (c Chacha20Poly1305) decrypt_generic(ciphertext []u8, nonce []u8, ad []u8) ![ mut constructed_msg := []u8{} poly1305_construct_msg(mut constructed_msg, ad, encrypted) - mut tag := []u8{len: chacha20poly1305.tag_size} + mut tag := []u8{len: tag_size} mut po := poly1305.new(polykey)! po.update(constructed_msg) po.finish(mut tag) diff --git a/vlib/x/crypto/chacha20poly1305/chacha20poly1305_test.v b/vlib/x/crypto/chacha20poly1305/chacha20poly1305_test.v index ee00232e49704c..b71173c456fdd7 100644 --- a/vlib/x/crypto/chacha20poly1305/chacha20poly1305_test.v +++ b/vlib/x/crypto/chacha20poly1305/chacha20poly1305_test.v @@ -75,7 +75,7 @@ fn test_aead_decrypt_vector_test_51() ! { } fn test_decrypt_and_verify_tag() ! { - for i, c in chacha20poly1305.aead_cases { + for i, c in aead_cases { key := hex.decode(c.key)! aad := hex.decode(c.aad)! nonce := hex.decode(c.nonce)! @@ -167,7 +167,7 @@ struct ChapolyTest { // this test data come from golang vector tests of the same module fn test_cha20poly1305_vector_data() ! { - for i, t in chacha20poly1305.chapoly_testcases { + for i, t in chapoly_testcases { plaintext := hex.decode(t.plaintext)! aad := hex.decode(t.aad)! key := hex.decode(t.key)! diff --git a/vlib/x/crypto/poly1305/poly1305.v b/vlib/x/crypto/poly1305/poly1305.v index be84b0a5ceb7f1..79d65ca0971b9e 100644 --- a/vlib/x/crypto/poly1305/poly1305.v +++ b/vlib/x/crypto/poly1305/poly1305.v @@ -52,7 +52,7 @@ mut: // Poly1305 accumulator h Uint192 // buffer - buffer []u8 = []u8{len: poly1305.block_size} + buffer []u8 = []u8{len: block_size} leftover int // The done flag tells us if the instance should not be used again. // It's set to true after calling finish or reset on the instance. @@ -64,7 +64,7 @@ mut: // This is an oneshot function to create a tag and reset internal state after the call. // For incremental updates, use the method based on Poly1305 mac instance. pub fn create_tag(mut out []u8, msg []u8, key []u8) ! { - if out.len != poly1305.tag_size { + if out.len != tag_size { return error('poly1305: bad out tag_size') } mut po := new(key)! @@ -77,7 +77,7 @@ pub fn create_tag(mut out []u8, msg []u8, key []u8) ! { // It returns `true` if two tags is matching, `false` otherwise. pub fn verify_tag(tag []u8, msg []u8, key []u8) bool { mut po := new(key) or { panic(err) } - mut out := []u8{len: poly1305.tag_size} + mut out := []u8{len: tag_size} po.update(msg) po.finish(mut out) return subtle.constant_time_compare(tag, out) == 1 @@ -86,7 +86,7 @@ pub fn verify_tag(tag []u8, msg []u8, key []u8) bool { // new creates a new Poly1305 mac instance from 32 bytes of key provided. @[direct_array_access] pub fn new(key []u8) !&Poly1305 { - if key.len != poly1305.key_size { + if key.len != key_size { return error('poly1305: bad key length') } // Read the r part of the key and clamp it. Clamping was done by clearing @@ -99,8 +99,8 @@ pub fn new(key []u8) !&Poly1305 { // mask value is 0x0ffffffc0ffffffc0ffffffc0fffffff. // See the rmask0 and rmask1 constants above. r := unsigned.Uint128{ - lo: binary.little_endian_u64(key[0..8]) & poly1305.rmask0 - hi: binary.little_endian_u64(key[8..16]) & poly1305.rmask1 + lo: binary.little_endian_u64(key[0..8]) & rmask0 + hi: binary.little_endian_u64(key[8..16]) & rmask1 } // read s part from the rest bytes of key @@ -124,10 +124,10 @@ pub fn (mut po Poly1305) update(msg []u8) { // verify verifies if the `tag` is a valid message authenticated code for current state of // Poly1305 instance. Internally, it works on clone of the current instance. pub fn (po Poly1305) verify(tag []u8) bool { - assert tag.len == poly1305.tag_size + assert tag.len == tag_size // we work on copy of current instance mut ctx := po - mut out := []u8{len: poly1305.tag_size} + mut out := []u8{len: tag_size} if ctx.leftover > 0 { poly1305_blocks(mut ctx, ctx.buffer[..ctx.leftover]) } @@ -153,14 +153,14 @@ pub fn (mut po Poly1305) finish(mut out []u8) { // reinit reinitializes Poly1305 mac instance by resetting internal fields, and // then reinit instance with the new key. pub fn (mut po Poly1305) reinit(key []u8) { - if key.len != poly1305.key_size { + if key.len != key_size { panic('bad key size') } // first, we reset the instance and than setup its again po.reset() po.r = unsigned.Uint128{ - lo: binary.little_endian_u64(key[0..8]) & poly1305.rmask0 - hi: binary.little_endian_u64(key[8..16]) & poly1305.rmask1 + lo: binary.little_endian_u64(key[0..8]) & rmask0 + hi: binary.little_endian_u64(key[8..16]) & rmask1 } po.s = unsigned.Uint128{ lo: binary.little_endian_u64(key[16..24]) @@ -183,7 +183,7 @@ fn poly1305_update_block(mut po Poly1305, msg []u8) { mut idx := 0 // handle leftover if po.leftover > 0 { - want := math.min(poly1305.block_size - po.leftover, msglen) + want := math.min(block_size - po.leftover, msglen) block := msg[idx..idx + want] _ := copy(mut po.buffer[po.leftover..], block) @@ -191,15 +191,15 @@ fn poly1305_update_block(mut po Poly1305, msg []u8) { idx += want po.leftover += want - if po.leftover < poly1305.block_size { + if po.leftover < block_size { return } poly1305_blocks(mut po, po.buffer) po.leftover = 0 } // process full blocks - if msglen >= poly1305.block_size { - want := (msglen & ~(poly1305.block_size - 1)) + if msglen >= block_size { + want := (msglen & ~(block_size - 1)) mut block := unsafe { msg[idx..idx + want] } poly1305_blocks(mut po, block) idx += want @@ -233,13 +233,13 @@ fn poly1305_blocks(mut po Poly1305, msg []u8) { return } // For correctness and clarity, we check whether r is properly clamped. - if po.r.lo & poly1305.not_rmask0 != 0 && po.r.hi & poly1305.not_rmask1 != 0 { + if po.r.lo & not_rmask0 != 0 && po.r.hi & not_rmask1 != 0 { panic('poly1305: bad unclamped of r') } // We need the accumulator to be in correctly reduced form to make sure it is not overflowing. // To be safe when used, only maximum of four low bits of the high part of the accumulator (h.hi) // can be set, and the remaining high bits must not be set. - if po.h.hi & poly1305.mask_high60bits != 0 { + if po.h.hi & mask_high60bits != 0 { panic('poly1305: h need to be reduced') } @@ -259,10 +259,10 @@ fn poly1305_blocks(mut po Poly1305, msg []u8) { for msglen > 0 { // carry mut c := u64(0) - if msglen >= poly1305.block_size { + if msglen >= block_size { // Read the 16 bytes msg block as a little-endian number // and stored into the 128 bits of Uint128 - block := msg[idx..idx + poly1305.block_size] + block := msg[idx..idx + block_size] m := unsigned.Uint128{ lo: binary.little_endian_u64(block[0..8]) hi: binary.little_endian_u64(block[8..16]) @@ -276,12 +276,12 @@ fn poly1305_blocks(mut po Poly1305, msg []u8) { // so we can just add 1 to the high part of accumulator (h.hi += 1) // h.hi has been checked above, so, its safe to assume its not overflow h.hi += c + 1 - idx += poly1305.block_size - msglen -= poly1305.block_size + idx += block_size + msglen -= block_size } else { // The last one msg block might be shorter than 16 bytes long, // pad it with zeros to align with block_size. - mut buf := []u8{len: poly1305.block_size} + mut buf := []u8{len: block_size} subtle.constant_time_copy(1, mut buf[..msglen], msg[idx..idx + msglen]) // set a bit above msg size. @@ -295,8 +295,8 @@ fn poly1305_blocks(mut po Poly1305, msg []u8) { // add this number to the accumulator, ie, h += m h, c = h.add_128_checked(m, 0) h.hi += c - idx += poly1305.block_size - msglen -= poly1305.block_size + idx += block_size + msglen -= block_size } // perform h *= r and then do partial reduction modulo p to the output. @@ -311,11 +311,11 @@ fn poly1305_blocks(mut po Poly1305, msg []u8) { // finalize finalizes the reduction of accumulator h, adds it with secret s, // and then take 128 bits of h stored into out. fn finalize(mut out []u8, mut ac Uint192, s unsigned.Uint128) { - assert out.len == poly1305.tag_size + assert out.len == tag_size mut h := ac // compute t = h - p = h - (2¹³⁰ - 5), and select h as the result if the // subtraction underflows, and t otherwise. - t, b := h.sub_checked(poly1305.p) + t, b := h.sub_checked(p) // h = h if h < p else h - p h.lo = select_64(b, h.lo, t.lo) @@ -358,9 +358,9 @@ fn poly1305_squeeze(mut h Uint192, t [4]u64) { mut ac := Uint192{ lo: t[0] mi: t[1] - hi: t[2] & poly1305.mask_low2bits + hi: t[2] & mask_low2bits } - mut cc := unsigned.uint128_new(t[2] & poly1305.mask_high62bits, t[3]) + mut cc := unsigned.uint128_new(t[2] & mask_high62bits, t[3]) // reduction of general mersene prime, x = c * 2¹³⁰ + h = c * 5 + h (mod 2¹³⁰ - 5) // because 2¹³⁰ = 5 (mod 2¹³⁰ - 5) // here, we follow the go version diff --git a/vlib/x/crypto/poly1305/poly1305_test.v b/vlib/x/crypto/poly1305/poly1305_test.v index f0aa62ca4fd3f1..1c2a2e5beb4e6e 100644 --- a/vlib/x/crypto/poly1305/poly1305_test.v +++ b/vlib/x/crypto/poly1305/poly1305_test.v @@ -131,7 +131,7 @@ fn test_poly1305_with_smoked_messages_are_working_normally() ! { // This is a test case from RFC 8439 vector test data. // There are 12 cases provided. fn test_poly1305_core_rfc_vector_tests() ! { - for i, c in poly1305.rfc_test_cases { + for i, c in rfc_test_cases { mut key := hex.decode(c.key) or { panic(err.msg()) } msg := hex.decode(c.msg) or { panic(err.msg()) } expected_tag := hex.decode(c.tag) or { panic(err.msg()) } @@ -158,7 +158,7 @@ fn test_poly1305_core_rfc_vector_tests() ! { } fn test_poly1305_function_based_core_functionality() ! { - for i, c in poly1305.rfc_test_cases { + for i, c in rfc_test_cases { mut key := hex.decode(c.key) or { panic(err.msg()) } mut msg := hex.decode(c.msg) or { panic(err.msg()) } @@ -178,7 +178,7 @@ fn test_poly1305_function_based_core_functionality() ! { // its comes from golang poly1305 vector test, except minus with changed internal state test fn test_poly1305_smoked_data_vectors() ! { - for i, c in poly1305.testdata { + for i, c in testdata { mut key := hex.decode(c.key)! mut msg := hex.decode(c.msg)! expected_tag := hex.decode(c.tag)! diff --git a/vlib/x/crypto/sm4/sm4.v b/vlib/x/crypto/sm4/sm4.v index cc78a7346a603a..8789817882c03d 100644 --- a/vlib/x/crypto/sm4/sm4.v +++ b/vlib/x/crypto/sm4/sm4.v @@ -262,7 +262,7 @@ fn sm4_tl(ka u32) u32 { // Divide ka into 4 bytes, then use sbox transfer, combine again into a new u32 big_endian_put_u32_fixed(mut a, ka) - b[3], b[2], b[1], b[0] = sm4.sbox[a[3]], sm4.sbox[a[2]], sm4.sbox[a[1]], sm4.sbox[a[0]] + b[3], b[2], b[1], b[0] = sbox[a[3]], sbox[a[2]], sbox[a[1]], sbox[a[0]] bb := big_endian_u32_fixed(b) // Rotate Left Shift 02, 10, 18, 24 @@ -283,7 +283,7 @@ fn calculate_irk(ka u32) u32 { // Divide ka into 4 bytes, then use sbox transfer, combine them into a new u32 big_endian_put_u32_fixed(mut a, ka) - b[3], b[2], b[1], b[0] = sm4.sbox[a[3]], sm4.sbox[a[2]], sm4.sbox[a[1]], sm4.sbox[a[0]] + b[3], b[2], b[1], b[0] = sbox[a[3]], sbox[a[2]], sbox[a[1]], sbox[a[0]] bb := big_endian_u32_fixed(b) // Rotate Left Shift 13, 23 @@ -300,12 +300,12 @@ fn key_expansion(mut rk [32]u32, key [16]u8) { // k = key ^ fk big_endian_u128_fixed(key, mut mk) - k[3], k[2], k[1], k[0] = mk[3] ^ sm4.fk[3], mk[2] ^ sm4.fk[2], mk[1] ^ sm4.fk[1], mk[0] ^ sm4.fk[0] + k[3], k[2], k[1], k[0] = mk[3] ^ fk[3], mk[2] ^ fk[2], mk[1] ^ fk[1], mk[0] ^ fk[0] // generate the round keys for i in 0 .. 32 { // TODO: use 8:32 lookup table speedup - k[i + 4] = k[i] ^ calculate_irk(k[i + 1] ^ k[i + 2] ^ k[i + 3] ^ sm4.ck[i]) + k[i + 4] = k[i] ^ calculate_irk(k[i + 1] ^ k[i + 2] ^ k[i + 3] ^ ck[i]) rk[i] = k[i + 4] } } @@ -325,7 +325,7 @@ fn one_round(rk [32]u32, input [16]u8, mut output [16]u8) { tmp1 = x[0] ^ sm4_tl(tmp) } $else { // use 8:32 lookup table - tmp1 = x[0] ^ sm4.table0[(tmp >> 24) & 0xff] ^ sm4.table1[(tmp >> 16) & 0xff] ^ sm4.table2[(tmp >> 8) & 0xff] ^ sm4.table3[(tmp >> 0) & 0xff] + tmp1 = x[0] ^ table0[(tmp >> 24) & 0xff] ^ table1[(tmp >> 16) & 0xff] ^ table2[(tmp >> 8) & 0xff] ^ table3[(tmp >> 0) & 0xff] } x[3], x[2], x[1], x[0] = tmp1, x[3], x[2], x[1] } diff --git a/vlib/x/json2/count_test.v b/vlib/x/json2/count_test.v index 36f9b28e5e3403..1acb7e2894d370 100644 --- a/vlib/x/json2/count_test.v +++ b/vlib/x/json2/count_test.v @@ -82,7 +82,7 @@ fn test_types() { count_test(StructType[time.Time]{}) - count_test(StructType[time.Time]{ val: json2.fixed_time }) + count_test(StructType[time.Time]{ val: fixed_time }) count_test(StructType[StructType[int]]{ val: StructType[int]{ diff --git a/vlib/x/json2/encoder.v b/vlib/x/json2/encoder.v index d99d49a874dcd9..179f7f655604ac 100644 --- a/vlib/x/json2/encoder.v +++ b/vlib/x/json2/encoder.v @@ -117,13 +117,13 @@ fn (e &Encoder) encode_newline(level int, mut buf []u8) ! { } fn (e &Encoder) encode_map[T](value T, level int, mut buf []u8) ! { - buf << json2.curly_open_rune + buf << curly_open_rune mut idx := 0 for k, v in value { e.encode_newline(level, mut buf)! // e.encode_string(k.str(), mut buf)! e.encode_string(k, mut buf)! - buf << json2.colon_rune + buf << colon_rune if e.newline != 0 { buf << ` ` } @@ -140,13 +140,13 @@ fn (e &Encoder) encode_map[T](value T, level int, mut buf []u8) ! { } if idx < value.len - 1 { - buf << json2.comma_rune + buf << comma_rune } idx++ } // e.encode_newline(level, mut buf)! e.encode_newline(level - 1, mut buf)! - buf << json2.curly_close_rune + buf << curly_close_rune } fn (e &Encoder) encode_value_with_level[T](val T, level int, mut buf []u8) ! { @@ -167,9 +167,9 @@ fn (e &Encoder) encode_value_with_level[T](val T, level int, mut buf []u8) ! { // TODO } $else $if T is time.Time { str_value := val.format_rfc3339() - buf << json2.quote_rune + buf << quote_rune unsafe { buf.push_many(str_value.str, str_value.len) } - buf << json2.quote_rune + buf << quote_rune } $else $if T is $map { e.encode_map(val, level, mut buf)! } $else $if T is $array { @@ -178,7 +178,7 @@ fn (e &Encoder) encode_value_with_level[T](val T, level int, mut buf []u8) ! { str_value := val.json_str() unsafe { buf.push_many(str_value.str, str_value.len) } } $else $if T is Null { - unsafe { buf.push_many(json2.null_in_bytes.str, json2.null_in_bytes.len) } + unsafe { buf.push_many(null_in_bytes.str, null_in_bytes.len) } } $else $if T is $struct { e.encode_struct(val, level, mut buf)! } $else $if T is $enum { @@ -193,7 +193,7 @@ fn (e &Encoder) encode_value_with_level[T](val T, level int, mut buf []u8) ! { } fn (e &Encoder) encode_struct[U](val U, level int, mut buf []u8) ! { - buf << json2.curly_open_rune + buf << curly_open_rune mut i := 0 mut fields_len := 0 @@ -252,7 +252,7 @@ fn (e &Encoder) encode_struct[U](val U, level int, mut buf []u8) ! { } else { e.encode_string(field.name, mut buf)! } - buf << json2.colon_rune + buf << colon_rune if e.newline != 0 { buf << ` ` @@ -270,7 +270,7 @@ fn (e &Encoder) encode_struct[U](val U, level int, mut buf []u8) ! { } else { e.encode_string(field.name, mut buf)! } - buf << json2.colon_rune + buf << colon_rune if e.newline != 0 { buf << ` ` @@ -296,14 +296,14 @@ fn (e &Encoder) encode_struct[U](val U, level int, mut buf []u8) ! { e.encode_string(val.$(field.name).str(), mut buf)! } $else $if field.typ is time.Time { str_value := val.$(field.name).format_rfc3339() - buf << json2.quote_rune + buf << quote_rune unsafe { buf.push_many(str_value.str, str_value.len) } - buf << json2.quote_rune + buf << quote_rune } $else $if field.typ is bool { if value { - unsafe { buf.push_many(json2.true_in_string.str, json2.true_in_string.len) } + unsafe { buf.push_many(true_in_string.str, true_in_string.len) } } else { - unsafe { buf.push_many(json2.false_in_string.str, json2.false_in_string.len) } + unsafe { buf.push_many(false_in_string.str, false_in_string.len) } } } $else $if field.typ in [$float, $int] { str_value := val.$(field.name).str() @@ -340,9 +340,9 @@ fn (e &Encoder) encode_struct[U](val U, level int, mut buf []u8) ! { e.encode_string(parsed_time.format_rfc3339(), mut buf)! } $else $if field.unaliased_typ is bool { if val.$(field.name) { - unsafe { buf.push_many(json2.true_in_string.str, json2.true_in_string.len) } + unsafe { buf.push_many(true_in_string.str, true_in_string.len) } } else { - unsafe { buf.push_many(json2.false_in_string.str, json2.false_in_string.len) } + unsafe { buf.push_many(false_in_string.str, false_in_string.len) } } } $else $if field.unaliased_typ in [$float, $int] { str_value := val.$(field.name).str() @@ -366,7 +366,7 @@ fn (e &Encoder) encode_struct[U](val U, level int, mut buf []u8) ! { if i < fields_len - 1 && !ignore_field { if !is_nil { - buf << json2.comma_rune + buf << comma_rune } } if !ignore_field { @@ -374,13 +374,13 @@ fn (e &Encoder) encode_struct[U](val U, level int, mut buf []u8) ! { } } e.encode_newline(level - 1, mut buf)! - buf << json2.curly_close_rune + buf << curly_close_rune // b.measure('encode_struct') } fn (e &Encoder) encode_array[U](val []U, level int, mut buf []u8) ! { if val.len == 0 { - unsafe { buf.push_many(&json2.empty_array[0], json2.empty_array.len) } + unsafe { buf.push_many(&empty_array[0], empty_array.len) } return } buf << `[` @@ -402,7 +402,7 @@ fn (e &Encoder) encode_array[U](val []U, level int, mut buf []u8) ! { return error('type ${typeof(val).name} cannot be array encoded') } if i < val.len - 1 { - buf << json2.comma_rune + buf << comma_rune } } @@ -455,17 +455,17 @@ pub fn (f Any) prettify_json_str() string { @[direct_array_access] fn (e &Encoder) encode_string(s string, mut buf []u8) ! { if s == '' { - empty := [u8(json2.quote_rune), json2.quote_rune]! + empty := [u8(quote_rune), quote_rune]! unsafe { buf.push_many(&empty[0], 2) } return } mut last_no_buffer_expansible_char_position_candidate := 0 - buf << json2.quote_rune + buf << quote_rune if !e.escape_unicode { unsafe { buf.push_many(s.str, s.len) - buf << json2.quote_rune + buf << quote_rune } return } @@ -477,8 +477,7 @@ fn (e &Encoder) encode_string(s string, mut buf []u8) ! { current_value_cause_buffer_expansion := (current_utf8_len == 1 && ((current_byte < 32 || current_byte > 127) - || current_byte in json2.ascii_especial_characters)) - || current_utf8_len == 3 + || current_byte in ascii_especial_characters)) || current_utf8_len == 3 if !current_value_cause_buffer_expansion { // while it is not the last one @@ -521,21 +520,21 @@ fn (e &Encoder) encode_string(s string, mut buf []u8) ! { if current_byte < 32 { // ASCII Control Characters unsafe { - buf.push_many(json2.ascii_control_characters[current_byte].str, json2.ascii_control_characters[current_byte].len) + buf.push_many(ascii_control_characters[current_byte].str, ascii_control_characters[current_byte].len) } last_no_buffer_expansible_char_position_candidate = idx + 1 } else if current_byte >= 32 && current_byte < 128 { // ASCII especial characters if current_byte == `\\` { - unsafe { buf.push_many(&json2.back_slash[0], json2.back_slash.len) } + unsafe { buf.push_many(&back_slash[0], back_slash.len) } last_no_buffer_expansible_char_position_candidate = idx + 1 continue } else if current_byte == `"` { - unsafe { buf.push_many(&json2.quote[0], json2.quote.len) } + unsafe { buf.push_many("e[0], quote.len) } last_no_buffer_expansible_char_position_candidate = idx + 1 continue } else if current_byte == `/` { - unsafe { buf.push_many(&json2.slash[0], json2.slash.len) } + unsafe { buf.push_many(&slash[0], slash.len) } last_no_buffer_expansible_char_position_candidate = idx + 1 continue } @@ -563,7 +562,7 @@ fn (e &Encoder) encode_string(s string, mut buf []u8) ! { codepoint = u32((codepoint << 6) | (b & 0x3F)) } // runes like: ✔, ひらがな ... - unsafe { buf.push_many(&json2.null_unicode[0], json2.null_unicode.len) } + unsafe { buf.push_many(&null_unicode[0], null_unicode.len) } buf[buf.len - 1] = hex_digit(codepoint & 0xF) buf[buf.len - 2] = hex_digit((codepoint >> 4) & 0xF) buf[buf.len - 3] = hex_digit((codepoint >> 8) & 0xF) @@ -573,7 +572,7 @@ fn (e &Encoder) encode_string(s string, mut buf []u8) ! { } } - buf << json2.quote_rune + buf << quote_rune } fn hex_digit(n int) u8 { diff --git a/vlib/x/json2/scanner.v b/vlib/x/json2/scanner.v index 6c656b10d7d380..90bb6709126f39 100644 --- a/vlib/x/json2/scanner.v +++ b/vlib/x/json2/scanner.v @@ -79,13 +79,13 @@ fn (mut s Scanner) move_pos_with_newlines() { fn (mut s Scanner) move_pos(include_space bool, include_newlines bool) { s.pos++ if s.pos < s.text.len { - if include_newlines && s.text[s.pos] in json2.newlines { + if include_newlines && s.text[s.pos] in newlines { s.line++ s.col = 0 if s.text[s.pos] == `\r` && s.pos + 1 < s.text.len && s.text[s.pos + 1] == `\n` { s.pos++ } - for s.pos < s.text.len && s.text[s.pos] in json2.newlines { + for s.pos < s.text.len && s.text[s.pos] in newlines { s.move() } } else if include_space && s.text[s.pos] == ` ` { @@ -130,15 +130,14 @@ fn (mut s Scanner) text_scan() Token { if (s.pos - 1 >= 0 && s.text[s.pos - 1] != `\\`) && ch == `"` { has_closed = true break - } else if (s.pos - 1 >= 0 && s.text[s.pos - 1] != `\\`) - && ch in json2.important_escapable_chars { + } else if (s.pos - 1 >= 0 && s.text[s.pos - 1] != `\\`) && ch in important_escapable_chars { return s.error('character must be escaped with a backslash') } else if (s.pos == s.text.len - 1 && ch == `\\`) || ch == u8(0) { return s.error('invalid backslash escape') } else if s.pos + 1 < s.text.len && ch == `\\` { peek := s.text[s.pos + 1] - if peek in json2.valid_unicode_escapes { - chrs << json2.unicode_transform_escapes[int(peek)] + if peek in valid_unicode_escapes { + chrs << unicode_transform_escapes[int(peek)] s.pos++ s.col++ continue @@ -224,7 +223,7 @@ fn (mut s Scanner) num_scan() Token { if s.pos < s.text.len && (s.text[s.pos] == `e` || s.text[s.pos] == `E`) { digits << s.text[s.pos] s.move_pos_with_newlines() - if s.pos < s.text.len && s.text[s.pos] in json2.exp_signs { + if s.pos < s.text.len && s.text[s.pos] in exp_signs { digits << s.text[s.pos] s.move_pos_with_newlines() } @@ -257,7 +256,7 @@ fn (s Scanner) invalid_token() Token { // used to set the next token @[manualfree] fn (mut s Scanner) scan() Token { - if s.pos < s.text.len && (s.text[s.pos] == ` ` || s.text[s.pos] in json2.newlines) { + if s.pos < s.text.len && (s.text[s.pos] == ` ` || s.text[s.pos] in newlines) { s.move() } if s.pos >= s.text.len { @@ -295,7 +294,7 @@ fn (mut s Scanner) scan() Token { } unsafe { ident.free() } return s.invalid_token() - } else if s.text[s.pos] in json2.char_list { + } else if s.text[s.pos] in char_list { chr := s.text[s.pos] tok := s.tokenize([]u8{}, unsafe { TokenKind(int(chr)) }) s.move() diff --git a/vlib/x/sessions/sessions.v b/vlib/x/sessions/sessions.v index 4954833af61b73..c356eac7d1f958 100644 --- a/vlib/x/sessions/sessions.v +++ b/vlib/x/sessions/sessions.v @@ -12,7 +12,7 @@ const session_id_length = 32 // new_session_id creates and returns a random session id and its signed version. // You can directly use the signed version as a cookie value pub fn new_session_id(secret []u8) (string, string) { - sid := rand.hex(sessions.session_id_length) + sid := rand.hex(session_id_length) hashed := hmac.new(secret, sid.bytes(), sha256.sum, sha256.block_size) diff --git a/vlib/x/templating/dtm/dynamic_template_manager.v b/vlib/x/templating/dtm/dynamic_template_manager.v index 77a89e98d7e63c..891e8f3371404b 100644 --- a/vlib/x/templating/dtm/dynamic_template_manager.v +++ b/vlib/x/templating/dtm/dynamic_template_manager.v @@ -71,7 +71,7 @@ mut: template_caches shared []TemplateCache = []TemplateCache{} // counter for each individual TemplateCache created/updated id_counter int = 1 - ch_cache_handler chan TemplateCache = chan TemplateCache{cap: dtm.cache_handler_channel_cap} + ch_cache_handler chan TemplateCache = chan TemplateCache{cap: cache_handler_channel_cap} // 'id_to_handlered' field is used exclusively by the cache handler to update or delete specific 'TemplateCache' in the cache database. id_to_handlered int close_cache_handler bool @@ -79,7 +79,7 @@ mut: compress_html bool = true active_cache_server bool = true // Initialisation of max data size in memory storage - max_size_data_in_memory int = dtm.max_size_data_in_memory + max_size_data_in_memory int = max_size_data_in_memory // This array is designed to store a control process that checks whether cached data is currently in use while simultaneously handling expiration. // This allows for the harmonious management of both aspects and facilitates the necessary actions. nbr_of_remaining_template_request shared []RemainingTemplateRequest = []RemainingTemplateRequest{} @@ -161,7 +161,7 @@ struct HtmlFileInfo { pub struct TemplateCacheParams { pub: placeholders &map[string]DtmMultiTypeMap = &map[string]DtmMultiTypeMap{} - cache_delay_expiration i64 = dtm.cache_delay_expiration_by_default + cache_delay_expiration i64 = cache_delay_expiration_by_default } // DynamicTemplateManagerInitialisationParams is used with 'initialize' function. (See below at initialize section) @@ -171,7 +171,7 @@ pub: def_cache_path string compress_html bool = true active_cache_server bool = true - max_size_data_in_mem int = dtm.max_size_data_in_memory + max_size_data_in_mem int = max_size_data_in_memory test_cache_dir string test_template_dir string } @@ -230,7 +230,7 @@ pub fn initialize(dtm_init_params DynamicTemplateManagerInitialisationParams) &D } // If it is impossible to use a cache directory, the cache system is deactivated, and the user is warned." if !active_cache_handler { - eprintln('${dtm.message_signature_warn} The cache storage directory does not exist or has a problem and it was also not possible to use a folder suitable for temporary storage. Therefore, the cache system will be disabled. It is recommended to address the aforementioned issues to utilize the cache system.') + eprintln('${message_signature_warn} The cache storage directory does not exist or has a problem and it was also not possible to use a folder suitable for temporary storage. Therefore, the cache system will be disabled. It is recommended to address the aforementioned issues to utilize the cache system.') } else { cache_temporary_bool = true } @@ -239,19 +239,19 @@ pub fn initialize(dtm_init_params DynamicTemplateManagerInitialisationParams) &D // Control if 'templates' folder exist in the root project if !os.exists(dir_html_path) && !os.is_dir(dir_html_path) { system_ready = false - eprintln('${dtm.message_signature_error} The templates directory at the project root does not exist. Please create a "templates" directory at the root of your project with appropriate read permissions. This is a mandatory step for using the Dynamic Template Manager (DTM). Current path attempted for create the templates folder: "${dir_html_path}"') + eprintln('${message_signature_error} The templates directory at the project root does not exist. Please create a "templates" directory at the root of your project with appropriate read permissions. This is a mandatory step for using the Dynamic Template Manager (DTM). Current path attempted for create the templates folder: "${dir_html_path}"') } // Validates the 'max_size_data_in_mem' setting in 'dtm_init_params'. If it's within the valid range, it's applied; otherwise, default value is used. - if dtm_init_params.max_size_data_in_mem <= dtm.max_size_data_in_memory + if dtm_init_params.max_size_data_in_mem <= max_size_data_in_memory && dtm_init_params.max_size_data_in_mem >= 0 { max_size_memory = dtm_init_params.max_size_data_in_mem } else { - max_size_memory = dtm.max_size_data_in_memory + max_size_memory = max_size_data_in_memory mut type_error := 'exceeds' if dtm_init_params.max_size_data_in_mem < 0 { type_error = 'is invalid for define' } - eprintln('${dtm.message_signature_info} The value "${dtm_init_params.max_size_data_in_mem}KB" ${type_error} the memory storage limit. It will not be considered, and the limit will be set to ${dtm.max_size_data_in_memory}KB.') + eprintln('${message_signature_info} The value "${dtm_init_params.max_size_data_in_mem}KB" ${type_error} the memory storage limit. It will not be considered, and the limit will be set to ${max_size_data_in_memory}KB.') } mut dtmi := &DynamicTemplateManager{ @@ -270,9 +270,9 @@ pub fn initialize(dtm_init_params DynamicTemplateManagerInitialisationParams) &D dtmi.threads_handler << spawn dtmi.cache_handler() dtmi.threads_handler << spawn dtmi.handle_dtm_clock() } - println('${dtm.message_signature} Dynamic Template Manager activated') + println('${message_signature} Dynamic Template Manager activated') } else { - eprintln('${dtm.message_signature_error} Unable to use the Dynamic Template Manager, please refer to the above errors and correct them.') + eprintln('${message_signature_error} Unable to use the Dynamic Template Manager, please refer to the above errors and correct them.') } return dtmi @@ -301,7 +301,7 @@ pub fn (mut tm DynamicTemplateManager) expand(tmpl_path string, tmpl_var Templat if tm.dtm_init_is_ok { file_path, tmpl_name, current_content_checksum, tmpl_type := tm.check_tmpl_and_placeholders_size(tmpl_path, tmpl_var.placeholders) or { return err.msg() } - converted_cache_delay_expiration := i64(tmpl_var.cache_delay_expiration) * dtm.convert_seconds + converted_cache_delay_expiration := i64(tmpl_var.cache_delay_expiration) * convert_seconds // If cache exist, return necessary fields else, 'is_cache_exist' return false. is_cache_exist, id, path, mut last_template_mod, gen_at, cache_del_exp, content_checksum := tm.return_cache_info_isexistent(file_path) mut html := '' @@ -349,8 +349,8 @@ pub fn (mut tm DynamicTemplateManager) expand(tmpl_path string, tmpl_var Templat return html } else { tm.stop_cache_handler() - eprintln('${dtm.message_signature_error} The initialization phase of DTM has failed. Therefore, you cannot use it. Please address the errors and then restart the dtm server.') - return dtm.internat_server_error + eprintln('${message_signature_error} The initialization phase of DTM has failed. Therefore, you cannot use it. Please address the errors and then restart the dtm server.') + return internat_server_error } } @@ -391,16 +391,16 @@ fn check_and_clear_cache_files(c_folder string) ! { file_p := os.join_path(c_folder, 'test.tmp') // Create a text file for test permission access mut f := os.create(file_p) or { - return error('${dtm.message_signature_error} Files are not writable. Test fail, DTM initialization failed : ${err.msg()}') + return error('${message_signature_error} Files are not writable. Test fail, DTM initialization failed : ${err.msg()}') } f.close() // Read the previous text file for test permission access os.read_file(file_p) or { - return error('${dtm.message_signature_error} Files are not readable. Test fail, DTM initialization failed : ${err.msg()}') + return error('${message_signature_error} Files are not readable. Test fail, DTM initialization failed : ${err.msg()}') } // List all files in the cache folder files_list := os.ls(c_folder) or { - return error('${dtm.message_signature_error} While listing the cache directorie files, DTM initialization failed : ${err.msg()}') + return error('${message_signature_error} While listing the cache directorie files, DTM initialization failed : ${err.msg()}') } // Delete one by one "*.cache" or "*.tmp" files in the previous file list for file in files_list { @@ -408,7 +408,7 @@ fn check_and_clear_cache_files(c_folder string) ! { file_extension := os.file_ext(file_path).to_lower() if file_extension in ['.tmp', '.cache'] { os.rm(file_path) or { - eprintln('${dtm.message_signature_error} While deleting the cache file: ${file_path}. DTM initialization failed : ${err.msg()}') + eprintln('${message_signature_error} While deleting the cache file: ${file_path}. DTM initialization failed : ${err.msg()}') return } } @@ -453,8 +453,8 @@ fn (mut tm DynamicTemplateManager) check_tmpl_and_placeholders_size(f_path strin // Performs a basic check of the file extension. ext := os.file_ext(html_file) if ext != '.html' && ext != '.txt' { - eprintln('${dtm.message_signature_error} ${html_file}, is not a valid template file like .html or .txt') - return error(dtm.internat_server_error) + eprintln('${message_signature_error} ${html_file}, is not a valid template file like .html or .txt') + return error(internat_server_error) } if ext == '.txt' { define_file_type = TemplateType.text @@ -467,8 +467,8 @@ fn (mut tm DynamicTemplateManager) check_tmpl_and_placeholders_size(f_path strin } } } else { - eprintln("${dtm.message_signature_error} Template : '${html_file}' not found. Ensure all templates are located in the template directory.") - return error(dtm.internat_server_error) + eprintln("${message_signature_error} Template : '${html_file}' not found. Ensure all templates are located in the template directory.") + return error(internat_server_error) } } @@ -479,23 +479,23 @@ fn (mut tm DynamicTemplateManager) check_tmpl_and_placeholders_size(f_path strin mut combined_str := '' // Control placeholder key and value sizes for key, value in tmpl_var { - if key.len > dtm.max_placeholders_key_size { - eprintln('${dtm.message_signature_error} Length of placeholder key "${key}" exceeds the maximum allowed size for template content in file: ${html_file}. Max allowed size: ${dtm.max_placeholders_key_size} characters.') - return error(dtm.internat_server_error) + if key.len > max_placeholders_key_size { + eprintln('${message_signature_error} Length of placeholder key "${key}" exceeds the maximum allowed size for template content in file: ${html_file}. Max allowed size: ${max_placeholders_key_size} characters.') + return error(internat_server_error) } match value { string { - if value.len > dtm.max_placeholders_value_size { - eprintln('${dtm.message_signature_error} Length of placeholder value for key "${key}" exceeds the maximum allowed size for template content in file: ${html_file}. Max allowed size: ${dtm.max_placeholders_value_size} characters.') - return error(dtm.internat_server_error) + if value.len > max_placeholders_value_size { + eprintln('${message_signature_error} Length of placeholder value for key "${key}" exceeds the maximum allowed size for template content in file: ${html_file}. Max allowed size: ${max_placeholders_value_size} characters.') + return error(internat_server_error) } combined_str += value } else { casted_value := value.str() - if casted_value.len > dtm.max_placeholders_value_size { - eprintln('${dtm.message_signature_error} Length of placeholder value for key "${key}" exceeds the maximum allowed size for template content in file: ${html_file}. Max allowed size: ${dtm.max_placeholders_value_size} characters.') - return error(dtm.internat_server_error) + if casted_value.len > max_placeholders_value_size { + eprintln('${message_signature_error} Length of placeholder value for key "${key}" exceeds the maximum allowed size for template content in file: ${html_file}. Max allowed size: ${max_placeholders_value_size} characters.') + return error(internat_server_error) } combined_str += casted_value @@ -528,14 +528,14 @@ fn (mut tm DynamicTemplateManager) create_template_cache_and_display(tcs CacheRe // Control if cache delay expiration is correctly set. See the function itself for more details. check_if_cache_delay_iscorrect(cache_delay_expiration, tmpl_name) or { eprintln(err) - return dtm.internat_server_error + return internat_server_error } // Parses the template and stores the rendered output in the variable. See the function itself for more details. mut html := tm.parse_tmpl_file(file_path, tmpl_name, placeholders, tm.compress_html, tmpl_type) // If caching is enabled and the template content is valid, this section creates a temporary cache file, which is then used by the cache manager. // If successfully temporary is created, a cache creation/update notification is sent through its dedicated channel to the cache manager - if cache_delay_expiration != -1 && html != dtm.internat_server_error && tm.active_cache_server { + if cache_delay_expiration != -1 && html != internat_server_error && tm.active_cache_server { op_success, tmp_name := tm.create_temp_cache(html, file_path, unique_time) if op_success { tm.ch_cache_handler <- TemplateCache{ @@ -590,11 +590,11 @@ fn (tm DynamicTemplateManager) create_temp_cache(html &string, f_path string, ts // Converts the HTML content into a byte array html_bytes := html.bytes() mut f := os.create(cache_path) or { - eprintln('${dtm.message_signature_error} Cannot create temporary cache file : ${err.msg()}') + eprintln('${message_signature_error} Cannot create temporary cache file : ${err.msg()}') return false, '' } f.write(html_bytes) or { - eprintln('${dtm.message_signature_error} Cannot write in temporary cache file : ${err.msg()}') + eprintln('${message_signature_error} Cannot write in temporary cache file : ${err.msg()}') f.close() return false, '' } @@ -620,8 +620,8 @@ fn (mut tm DynamicTemplateManager) get_cache(name string, path string, placehold } .disk { r_b_html := os.read_bytes(value.cache_full_path_name) or { - eprintln('${dtm.message_signature_error} Get_cache() cannot read template cache file ${value.name} : ${err.msg()} ') - return dtm.internat_server_error + eprintln('${message_signature_error} Get_cache() cannot read template cache file ${value.name} : ${err.msg()} ') + return internat_server_error } html = r_b_html.bytestr() } @@ -751,7 +751,7 @@ fn (mut tm DynamicTemplateManager) cache_handler() { mut tc := <-tm.ch_cache_handler { // Close handler if asked. if tm.close_cache_handler { - eprintln('${dtm.message_signature_info} Cache manager has been successfully stopped. Please consider restarting the application if needed.') + eprintln('${message_signature_info} Cache manager has been successfully stopped. Please consider restarting the application if needed.') break } f_path_tmp := os.join_path(tm.template_cache_folder, tc.tmp_name_file) @@ -771,7 +771,7 @@ fn (mut tm DynamicTemplateManager) cache_handler() { tc.cache_storage_mode = .disk } file_data := os.read_bytes(f_path_tmp) or { - eprintln('${dtm.message_signature_error} Cache Handler : Failed to read tmp file, cache server will be stopped, you need to fix and restart application: ${err.msg()}') + eprintln('${message_signature_error} Cache Handler : Failed to read tmp file, cache server will be stopped, you need to fix and restart application: ${err.msg()}') break } @@ -783,7 +783,7 @@ fn (mut tm DynamicTemplateManager) cache_handler() { // If the cache is stored in memory, the temporary file is destroyed. tc.html_data = file_data os.rm(f_path_tmp) or { - eprintln('${dtm.message_signature_error} Cache Handler : While deleting the tmp cache file: "${f_path_tmp}", cache server will be stopped, you need to fix and restart application: ${err.msg()}') + eprintln('${message_signature_error} Cache Handler : While deleting the tmp cache file: "${f_path_tmp}", cache server will be stopped, you need to fix and restart application: ${err.msg()}') break } } @@ -792,7 +792,7 @@ fn (mut tm DynamicTemplateManager) cache_handler() { tc.cache_full_path_name = os.join_path(tm.template_cache_folder, '${tc.name}_${tc.checksum}.cache') os.mv(f_path_tmp, tc.cache_full_path_name) or { - eprintln('${dtm.message_signature_error} Cache Handler : Failed to rename tmp file, cache server will be stopped, you need to fix and restart application: ${err.msg()}') + eprintln('${message_signature_error} Cache Handler : Failed to rename tmp file, cache server will be stopped, you need to fix and restart application: ${err.msg()}') break } } @@ -838,7 +838,7 @@ fn (mut tm DynamicTemplateManager) cache_handler() { } } else if tc.cache_request != .delete { os.rm(f_path_tmp) or { - eprintln('${dtm.message_signature_warn} Cache Handler : Cannot deleting the unused tmp cache file: "${f_path_tmp}" : ${err.msg()}') + eprintln('${message_signature_warn} Cache Handler : Cannot deleting the unused tmp cache file: "${f_path_tmp}" : ${err.msg()}') } } } @@ -911,7 +911,7 @@ fn (mut tm DynamicTemplateManager) chandler_clear_specific_cache(id int) (int, b .disk { file_path := os.join_path(tm.template_cache_folder, '${value.name}_${value.checksum}.cache') os.rm(file_path) or { - eprintln('${dtm.message_signature_error} While deleting the specific cache file: ${file_path}, cache server will be stopped, you need to fix and restart application: : ${err.msg()}') + eprintln('${message_signature_error} While deleting the specific cache file: ${file_path}, cache server will be stopped, you need to fix and restart application: : ${err.msg()}') break } } @@ -1002,12 +1002,12 @@ fn (mut tm DynamicTemplateManager) parse_tmpl_file(file_path string, tmpl_name s mut tmpl_ := compile_template_file(file_path, tmpl_name, placeholders) // Performs a light compression of the HTML output by removing usless spaces, newlines, and tabs if user selected this option. - if is_compressed && tmpl_type == TemplateType.html && tmpl_ != dtm.internat_server_error { + if is_compressed && tmpl_type == TemplateType.html && tmpl_ != internat_server_error { tmpl_ = tmpl_.replace_each(['\n', '', '\t', '', ' ', ' ']) mut r := regex.regex_opt(r'>(\s+)<') or { tm.stop_cache_handler() - eprintln('${dtm.message_signature_error} with regular expression for HTML light compression in parse_tmpl_file() function. Please check the syntax of the regex pattern : ${err.msg()}') - return dtm.internat_server_error + eprintln('${message_signature_error} with regular expression for HTML light compression in parse_tmpl_file() function. Please check the syntax of the regex pattern : ${err.msg()}') + return internat_server_error } tmpl_ = r.replace(tmpl_, '><') for tmpl_.contains(' ') { @@ -1027,9 +1027,9 @@ fn (mut tm DynamicTemplateManager) parse_tmpl_file(file_path string, tmpl_name s // - A parameter of -1 for no caching, meaning the template is processed every time without being stored in the cache." // fn check_if_cache_delay_iscorrect(cde i64, tmpl_name string) ! { - if (cde != 0 && cde != -1 && cde < dtm.converted_cache_delay_expiration_at_min) - || (cde != 0 && cde != -1 && cde > dtm.converted_cache_delay_expiration_at_max) { - return error("${dtm.message_signature_error} The cache timeout for template '${tmpl_name}' cannot be set to a value less than '${dtm.cache_delay_expiration_at_min}' seconds and more than '${dtm.cache_delay_expiration_at_max}' seconds. Exception for the value '0' which means no cache expiration, and the value '-1' which means html generation without caching.") + if (cde != 0 && cde != -1 && cde < converted_cache_delay_expiration_at_min) + || (cde != 0 && cde != -1 && cde > converted_cache_delay_expiration_at_max) { + return error("${message_signature_error} The cache timeout for template '${tmpl_name}' cannot be set to a value less than '${cache_delay_expiration_at_min}' seconds and more than '${cache_delay_expiration_at_max}' seconds. Exception for the value '0' which means no cache expiration, and the value '-1' which means html generation without caching.") } } @@ -1078,24 +1078,24 @@ fn (mut tm DynamicTemplateManager) handle_dtm_clock() { mut need_to_close := false defer { tm.ch_stop_dtm_clock.close() - eprintln('${dtm.message_signature_info} DTM clock handler has been successfully stopped.') + eprintln('${message_signature_info} DTM clock handler has been successfully stopped.') } for { // Calculate the remaining time until the next update. - current_time := get_current_unix_micro_timestamp() / dtm.convert_seconds - mut time_since_last_update := int(current_time - (tm.c_time / dtm.convert_seconds)) - mut minimum_wait_time_until_next_update := dtm.update_duration + current_time := get_current_unix_micro_timestamp() / convert_seconds + mut time_since_last_update := int(current_time - (tm.c_time / convert_seconds)) + mut minimum_wait_time_until_next_update := update_duration // Update DTM clock if update interval exceeded otherwise, set next check based on time since last update - if time_since_last_update >= dtm.update_duration { - tm.c_time = current_time * dtm.convert_seconds + if time_since_last_update >= update_duration { + tm.c_time = current_time * convert_seconds } else { if time_since_last_update < 0 { time_since_last_update = 0 } - minimum_wait_time_until_next_update = (dtm.update_duration - time_since_last_update) + - dtm.update_duration + minimum_wait_time_until_next_update = (update_duration - time_since_last_update) + + update_duration } // Wait until the next update interval or until a stop signal is received. @@ -1115,7 +1115,7 @@ fn (mut tm DynamicTemplateManager) handle_dtm_clock() { break } // Reset wait time for next cycle. - minimum_wait_time_until_next_update = dtm.update_duration + minimum_wait_time_until_next_update = update_duration } } diff --git a/vlib/x/templating/dtm/dynamic_template_manager_cache_system_test.v b/vlib/x/templating/dtm/dynamic_template_manager_cache_system_test.v index 1593905b237f5a..c17e1ceb9004da 100644 --- a/vlib/x/templating/dtm/dynamic_template_manager_cache_system_test.v +++ b/vlib/x/templating/dtm/dynamic_template_manager_cache_system_test.v @@ -11,16 +11,16 @@ const temp_html_n = 'temp' const vtmp_dir = os.vtmp_dir() fn testsuite_begin() { - temp_folder := os.join_path(dtm.vtmp_dir, dtm.temp_dtm_dir) + temp_folder := os.join_path(vtmp_dir, temp_dtm_dir) os.mkdir_all(temp_folder)! - vcache_path := os.join_path(temp_folder, dtm.temp_cache_dir) - templates_path := os.join_path(temp_folder, dtm.temp_templates_dir) + vcache_path := os.join_path(temp_folder, temp_cache_dir) + templates_path := os.join_path(temp_folder, temp_templates_dir) os.mkdir_all(vcache_path)! os.mkdir_all(templates_path)! - temp_html_file := os.join_path(templates_path, dtm.temp_html_fp) + temp_html_file := os.join_path(templates_path, temp_html_fp) html_content := ' @@ -63,8 +63,8 @@ fn test_get_cache() { } if !dtmi.abort_test { dtm_placeholders := map[string]DtmMultiTypeMap{} - temp_html_file := os.join_path(dtmi.template_folder, dtm.temp_html_fp) - html_mem := dtmi.get_cache(dtm.temp_html_n, temp_html_file, &dtm_placeholders) + temp_html_file := os.join_path(dtmi.template_folder, temp_html_fp) + html_mem := dtmi.get_cache(temp_html_n, temp_html_file, &dtm_placeholders) assert html_mem.len > 10 } } @@ -103,10 +103,10 @@ fn test_cache_handler() { } dtmi.create_cache() if !dtmi.abort_test { - path_f := os.join_path(dtmi.template_folder, dtm.temp_html_fp) + path_f := os.join_path(dtmi.template_folder, temp_html_fp) lock dtmi.template_caches { assert dtmi.template_caches[0].id == 1 - assert dtmi.template_caches[0].name == dtm.temp_html_n + assert dtmi.template_caches[0].name == temp_html_n assert dtmi.template_caches[0].path == path_f } dtmi.id_to_handlered = 1 @@ -124,16 +124,16 @@ fn test_cache_handler() { } fn testsuite_end() { - temp_folder := os.join_path(dtm.vtmp_dir, dtm.temp_dtm_dir) + temp_folder := os.join_path(vtmp_dir, temp_dtm_dir) os.rmdir_all(temp_folder) or {} } // Utilities function : fn init_dtm(b bool, m int) &DynamicTemplateManager { - temp_folder := os.join_path(dtm.vtmp_dir, dtm.temp_dtm_dir) - vcache_path := os.join_path(temp_folder, dtm.temp_cache_dir) - templates_path := os.join_path(temp_folder, dtm.temp_templates_dir) + temp_folder := os.join_path(vtmp_dir, temp_dtm_dir) + vcache_path := os.join_path(temp_folder, temp_cache_dir) + templates_path := os.join_path(temp_folder, temp_templates_dir) init_params := DynamicTemplateManagerInitialisationParams{ active_cache_server: b @@ -148,14 +148,14 @@ fn init_dtm(b bool, m int) &DynamicTemplateManager { } fn (mut tm DynamicTemplateManager) create_cache() string { - temp_html_file := os.join_path(tm.template_folder, dtm.temp_html_fp) + temp_html_file := os.join_path(tm.template_folder, temp_html_fp) html_last_mod := os.file_last_mod_unix(temp_html_file) c_time := get_current_unix_micro_timestamp() cache_delay_exp := i64(500) * i64(1000000) placeholder := map[string]DtmMultiTypeMap{} content_checksum := '' html := tm.create_template_cache_and_display(.new, html_last_mod, c_time, temp_html_file, - dtm.temp_html_n, cache_delay_exp, &placeholder, content_checksum, TemplateType.html) + temp_html_n, cache_delay_exp, &placeholder, content_checksum, TemplateType.html) tm.sync_cache() return html diff --git a/vlib/x/templating/dtm/dynamic_template_manager_test.v b/vlib/x/templating/dtm/dynamic_template_manager_test.v index 565179d29fbcfe..a9795bb21b3154 100644 --- a/vlib/x/templating/dtm/dynamic_template_manager_test.v +++ b/vlib/x/templating/dtm/dynamic_template_manager_test.v @@ -9,14 +9,14 @@ const temp_html_n = 'temp' const vtmp_dir = os.vtmp_dir() fn testsuite_begin() { - temp_folder := os.join_path(dtm.vtmp_dir, dtm.temp_dtm_dir) + temp_folder := os.join_path(vtmp_dir, temp_dtm_dir) os.mkdir_all(temp_folder)! - templates_path := os.join_path(temp_folder, dtm.temp_templates_dir) + templates_path := os.join_path(temp_folder, temp_templates_dir) os.mkdir_all(templates_path)! - temp_html_file := os.join_path(templates_path, dtm.temp_html_fp) + temp_html_file := os.join_path(templates_path, temp_html_fp) html_content := ' @@ -41,21 +41,21 @@ fn test_initialize_dtm() { fn test_create_template_cache_and_display() { mut dtmi := init_dtm(false, 0) - temp_html_file := os.join_path(dtmi.template_folder, dtm.temp_html_fp) + temp_html_file := os.join_path(dtmi.template_folder, temp_html_fp) html_last_mod := os.file_last_mod_unix(temp_html_file) c_time := get_current_unix_micro_timestamp() cache_delay_exp := i64(500) * i64(1000000) placeholder := map[string]DtmMultiTypeMap{} content_checksum := '' html := dtmi.create_template_cache_and_display(.new, html_last_mod, c_time, temp_html_file, - dtm.temp_html_n, cache_delay_exp, &placeholder, content_checksum, TemplateType.html) + temp_html_n, cache_delay_exp, &placeholder, content_checksum, TemplateType.html) assert html.len > 10 } fn test_return_cache_info_isexistent() { mut dtmi := init_dtm(false, 0) - path_template := os.join_path(dtmi.template_folder, dtm.temp_html_fp) + path_template := os.join_path(dtmi.template_folder, temp_html_fp) lock dtmi.template_caches { dtmi.template_caches << TemplateCache{ id: 1 @@ -137,7 +137,7 @@ fn test_remaining_template_request() { fn test_check_tmpl_and_placeholders_size() { mut dtmi := init_dtm(false, 0) - temp_html_file := os.join_path(dtmi.template_folder, dtm.temp_html_fp) + temp_html_file := os.join_path(dtmi.template_folder, temp_html_fp) placeholders := map[string]DtmMultiTypeMap{} path, filename, content_checksum, tmpl_type := dtmi.check_tmpl_and_placeholders_size(temp_html_file, @@ -156,7 +156,7 @@ fn test_check_tmpl_and_placeholders_size() { fn test_chandler_prevent_cache_duplicate_request() { dtmi := init_dtm(false, 0) - temp_html_file := os.join_path(dtmi.template_folder, dtm.temp_html_fp) + temp_html_file := os.join_path(dtmi.template_folder, temp_html_fp) lock dtmi.template_caches { dtmi.template_caches << TemplateCache{ @@ -253,23 +253,23 @@ fn test_chandler_remaining_cache_template_used() { fn test_parse_tmpl_file() { mut dtmi := init_dtm(false, 0) - temp_folder := os.join_path(dtm.vtmp_dir, dtm.temp_dtm_dir) - templates_path := os.join_path(temp_folder, dtm.temp_templates_dir) - temp_html_file := os.join_path(templates_path, dtm.temp_html_fp) + temp_folder := os.join_path(vtmp_dir, temp_dtm_dir) + templates_path := os.join_path(temp_folder, temp_templates_dir) + temp_html_file := os.join_path(templates_path, temp_html_fp) mut placeholders := map[string]DtmMultiTypeMap{} is_compressed := true - html := dtmi.parse_tmpl_file(temp_html_file, dtm.temp_html_n, &placeholders, is_compressed, + html := dtmi.parse_tmpl_file(temp_html_file, temp_html_n, &placeholders, is_compressed, TemplateType.html) assert html.len > 0 } fn test_check_if_cache_delay_iscorrect() { - check_if_cache_delay_iscorrect(i64(300 * 1000000), dtm.temp_html_n) or { assert false } + check_if_cache_delay_iscorrect(i64(300 * 1000000), temp_html_n) or { assert false } - check_if_cache_delay_iscorrect(i64(-100), dtm.temp_html_n) or { assert true } + check_if_cache_delay_iscorrect(i64(-100), temp_html_n) or { assert true } } fn test_cache_request_route() { @@ -315,15 +315,15 @@ fn test_cache_request_route() { } fn testsuite_end() { - temp_folder := os.join_path(dtm.vtmp_dir, dtm.temp_dtm_dir) + temp_folder := os.join_path(vtmp_dir, temp_dtm_dir) os.rmdir_all(temp_folder) or {} } // Utilities function : fn init_dtm(b bool, m int) &DynamicTemplateManager { - temp_folder := os.join_path(dtm.vtmp_dir, dtm.temp_dtm_dir) - templates_path := os.join_path(temp_folder, dtm.temp_templates_dir) + temp_folder := os.join_path(vtmp_dir, temp_dtm_dir) + templates_path := os.join_path(temp_folder, temp_templates_dir) init_params := DynamicTemplateManagerInitialisationParams{ active_cache_server: b diff --git a/vlib/x/templating/dtm/tmpl.v b/vlib/x/templating/dtm/tmpl.v index f57915f54e32c0..a535f80011bd32 100644 --- a/vlib/x/templating/dtm/tmpl.v +++ b/vlib/x/templating/dtm/tmpl.v @@ -134,7 +134,7 @@ fn insert_template_code(fn_name string, tmpl_str_start string, line string, data state State) string { // HTML, may include `@var` // escaped by cgen, unless it's a `vweb.RawHtml` string - trailing_bs := dtm.tmpl_str_end + 'sb_${fn_name}.write_u8(92)\n' + tmpl_str_start + trailing_bs := tmpl_str_end + 'sb_${fn_name}.write_u8(92)\n' + tmpl_str_start round1 := ['\\', '\\\\', r"'", "\\'", r'@', r'$'] round2 := [r'$$', r'\@', r'.$', r'.@'] mut rline := line.replace_each(round1).replace_each(round2) diff --git a/vlib/x/ttf/common.v b/vlib/x/ttf/common.v index 038d25598cec67..3c5665c1b0e0f4 100644 --- a/vlib/x/ttf/common.v +++ b/vlib/x/ttf/common.v @@ -39,7 +39,7 @@ pub enum Style { const debug_flag = false fn dprintln(txt string) { - if ttf.debug_flag { + if debug_flag { println(txt) } } diff --git a/vlib/x/ttf/ttf.v b/vlib/x/ttf/ttf.v index 49a9d1426c5e22..e84dd5b086396c 100644 --- a/vlib/x/ttf/ttf.v +++ b/vlib/x/ttf/ttf.v @@ -136,7 +136,7 @@ const g_type_complex = u16(2) pub struct Glyph { pub mut: - g_type u16 = ttf.g_type_simple + g_type u16 = g_type_simple contour_ends []u16 number_of_contours i16 points []Point @@ -382,9 +382,9 @@ fn (mut tf TTF_File) read_simple_glyph(mut in_glyph Glyph) { in_glyph.points << Point{ x: 0 y: 0 - on_curve: (flag & ttf.tfk_on_curve) > 0 + on_curve: (flag & tfk_on_curve) > 0 } - if (flag & ttf.tfk_repeat) > 0 { + if (flag & tfk_repeat) > 0 { mut repeat_count := tf.get_u8() assert repeat_count > 0 i += repeat_count @@ -393,7 +393,7 @@ fn (mut tf TTF_File) read_simple_glyph(mut in_glyph Glyph) { in_glyph.points << Point{ x: 0 y: 0 - on_curve: (flag & ttf.tfk_on_curve) > 0 + on_curve: (flag & tfk_on_curve) > 0 } repeat_count-- } @@ -405,13 +405,13 @@ fn (mut tf TTF_File) read_simple_glyph(mut in_glyph Glyph) { mut value := 0 for i_x in 0 .. num_points { flag_x := flags[i_x] - if (flag_x & ttf.tfk_x_is_byte) > 0 { - if (flag_x & ttf.tfk_x_delta) > 0 { + if (flag_x & tfk_x_is_byte) > 0 { + if (flag_x & tfk_x_delta) > 0 { value += tf.get_u8() } else { value -= tf.get_u8() } - } else if (~flag_x & ttf.tfk_x_delta) > 0 { + } else if (~flag_x & tfk_x_delta) > 0 { value += tf.get_i16() } else { // value is unchanged @@ -424,13 +424,13 @@ fn (mut tf TTF_File) read_simple_glyph(mut in_glyph Glyph) { value = 0 for i_y in 0 .. num_points { flag_y := flags[i_y] - if (flag_y & ttf.tfk_y_is_byte) > 0 { - if (flag_y & ttf.tfk_y_delta) > 0 { + if (flag_y & tfk_y_is_byte) > 0 { + if (flag_y & tfk_y_delta) > 0 { value += tf.get_u8() } else { value -= tf.get_u8() } - } else if (~flag_y & ttf.tfk_y_delta) > 0 { + } else if (~flag_y & tfk_y_delta) > 0 { value += tf.get_i16() } else { // value is unchanged @@ -464,10 +464,10 @@ mut: } fn (mut tf TTF_File) read_compound_glyph(mut in_glyph Glyph) { - in_glyph.g_type = ttf.g_type_complex + in_glyph.g_type = g_type_complex mut component := Component{} - mut flags := ttf.tfkc_more_components - for (flags & ttf.tfkc_more_components) > 0 { + mut flags := tfkc_more_components + for (flags & tfkc_more_components) > 0 { mut arg1 := i16(0) mut arg2 := i16(0) @@ -475,7 +475,7 @@ fn (mut tf TTF_File) read_compound_glyph(mut in_glyph Glyph) { component.glyph_index = tf.get_u16() - if (flags & ttf.tfkc_arg_1_and_2_are_words) > 0 { + if (flags & tfkc_arg_1_and_2_are_words) > 0 { arg1 = tf.get_i16() arg2 = tf.get_i16() } else { @@ -483,7 +483,7 @@ fn (mut tf TTF_File) read_compound_glyph(mut in_glyph Glyph) { arg2 = tf.get_u8() } - if (flags & ttf.tfkc_args_are_xy_values) > 0 { + if (flags & tfkc_args_are_xy_values) > 0 { component.matrix[4] = arg1 component.matrix[5] = arg2 } else { @@ -491,13 +491,13 @@ fn (mut tf TTF_File) read_compound_glyph(mut in_glyph Glyph) { component.src_point_index = arg2 } - if (flags & ttf.tfkc_we_have_a_scale) > 0 { + if (flags & tfkc_we_have_a_scale) > 0 { component.matrix[0] = tf.get_2dot14() component.matrix[3] = component.matrix[0] - } else if (flags & ttf.tfkc_we_have_an_x_and_y_scale) > 0 { + } else if (flags & tfkc_we_have_an_x_and_y_scale) > 0 { component.matrix[0] = tf.get_2dot14() component.matrix[3] = tf.get_2dot14() - } else if (flags & ttf.tfkc_we_have_a_two_by_two) > 0 { + } else if (flags & tfkc_we_have_a_two_by_two) > 0 { component.matrix[0] = tf.get_2dot14() component.matrix[1] = tf.get_2dot14() component.matrix[2] = tf.get_2dot14() @@ -532,7 +532,7 @@ fn (mut tf TTF_File) read_compound_glyph(mut in_glyph Glyph) { in_glyph.number_of_contours = i16(in_glyph.contour_ends.len) - if (flags & ttf.tfkc_we_have_instructions) > 0 { + if (flags & tfkc_we_have_instructions) > 0 { tf.pos = tf.get_u16() + tf.pos } // ok we have a valid glyph diff --git a/vlib/x/vweb/middleware.v b/vlib/x/vweb/middleware.v index 0358b208d891dd..ab416e160b2da2 100644 --- a/vlib/x/vweb/middleware.v +++ b/vlib/x/vweb/middleware.v @@ -225,7 +225,7 @@ pub fn (options &CorsOptions) set_headers(mut ctx Context) { } else if _ := ctx.req.header.get(.access_control_request_headers) { // a server must respond with `Access-Control-Allow-Headers` if // `Access-Control-Request-Headers` is present in a preflight request - ctx.set_header(.access_control_allow_headers, vweb.cors_safelisted_response_headers.join(',')) + ctx.set_header(.access_control_allow_headers, cors_safelisted_response_headers.join(',')) } if options.allowed_methods.len > 0 { diff --git a/vlib/x/vweb/parse.v b/vlib/x/vweb/parse.v index 5c7d8410557575..2220efd2356526 100644 --- a/vlib/x/vweb/parse.v +++ b/vlib/x/vweb/parse.v @@ -77,11 +77,11 @@ fn parse_form_from_request(request http.Request) !(map[string]string, map[string } ct := request.header.get(.content_type) or { '' }.split(';').map(it.trim_left(' \t')) if 'multipart/form-data' in ct { - boundaries := ct.filter(it.starts_with(vweb.boundary_start)) + boundaries := ct.filter(it.starts_with(boundary_start)) if boundaries.len != 1 { return error('detected more that one form-data boundary') } - boundary := boundaries[0].all_after(vweb.boundary_start) + boundary := boundaries[0].all_after(boundary_start) if boundary.len > 0 && boundary[0] == `"` { // quotes are send by our http.post_multipart_form/2: return http.parse_multipart_form(request.data, boundary.trim('"')) diff --git a/vlib/x/vweb/vweb.v b/vlib/x/vweb/vweb.v index 787623970e8406..2044652112ae7b 100644 --- a/vlib/x/vweb/vweb.v +++ b/vlib/x/vweb/vweb.v @@ -313,7 +313,7 @@ pub fn run_at[A, X](mut global_app A, params RunParams) ! { pico_context.idx = []int{len: picoev.max_fds} // reserve space for read and write buffers - pico_context.buf = unsafe { malloc_noscan(picoev.max_fds * vweb.max_read + 1) } + pico_context.buf = unsafe { malloc_noscan(picoev.max_fds * max_read + 1) } defer { unsafe { free(pico_context.buf) } } @@ -384,7 +384,7 @@ fn handle_timeout(mut pv picoev.Picoev, mut params RequestParams, fd int) { is_blocking: false } - fast_send_resp(mut conn, vweb.http_408) or {} + fast_send_resp(mut conn, http_408) or {} pv.close_conn(fd) params.request_done(fd) @@ -399,8 +399,8 @@ fn handle_write_file(mut pv picoev.Picoev, mut params RequestParams, fd int) { bytes_written := sendfile(fd, params.file_responses[fd].file.fd, bytes_to_write) params.file_responses[fd].pos += bytes_written } $else { - if bytes_to_write > vweb.max_write { - bytes_to_write = vweb.max_write + if bytes_to_write > max_write { + bytes_to_write = max_write } data := unsafe { malloc(bytes_to_write) } @@ -440,8 +440,8 @@ fn handle_write_file(mut pv picoev.Picoev, mut params RequestParams, fd int) { fn handle_write_string(mut pv picoev.Picoev, mut params RequestParams, fd int) { mut bytes_to_write := int(params.string_responses[fd].str.len - params.string_responses[fd].pos) - if bytes_to_write > vweb.max_write { - bytes_to_write = vweb.max_write + if bytes_to_write > max_write { + bytes_to_write = max_write } mut conn := &net.TcpConn{ @@ -481,7 +481,7 @@ fn handle_read[A, X](mut pv picoev.Picoev, mut params RequestParams, fd int) { } // cap the max_read to 8KB - mut reader := io.new_buffered_reader(reader: conn, cap: vweb.max_read) + mut reader := io.new_buffered_reader(reader: conn, cap: max_read) defer { unsafe { reader.free() @@ -510,11 +510,11 @@ fn handle_read[A, X](mut pv picoev.Picoev, mut params RequestParams, fd int) { params.incomplete_requests[fd] = http.Request{} return } - if reader.total_read >= vweb.max_read { + if reader.total_read >= max_read { // throw an error when the request header is larger than 8KB // same limit that apache handles eprintln('[vweb] error parsing request: too large') - fast_send_resp(mut conn, vweb.http_413) or {} + fast_send_resp(mut conn, http_413) or {} pv.close_conn(fd) params.incomplete_requests[fd] = http.Request{} @@ -525,16 +525,16 @@ fn handle_read[A, X](mut pv picoev.Picoev, mut params RequestParams, fd int) { // check if the request has a body content_length := req.header.get(.content_length) or { '0' } if content_length.int() > 0 { - mut max_bytes_to_read := vweb.max_read - reader.total_read + mut max_bytes_to_read := max_read - reader.total_read mut bytes_to_read := content_length.int() - params.idx[fd] // cap the bytes to read to 8KB for the body, including the request headers if any - if bytes_to_read > vweb.max_read - reader.total_read { - bytes_to_read = vweb.max_read - reader.total_read + if bytes_to_read > max_read - reader.total_read { + bytes_to_read = max_read - reader.total_read } mut buf_ptr := params.buf unsafe { - buf_ptr += fd * vweb.max_read // pointer magic + buf_ptr += fd * max_read // pointer magic } // convert to []u8 for BufferedReader mut buf := unsafe { buf_ptr.vbytes(max_bytes_to_read) } @@ -557,7 +557,7 @@ fn handle_read[A, X](mut pv picoev.Picoev, mut params RequestParams, fd int) { header: http.new_header( key: .content_type value: 'text/plain' - ).join(vweb.headers_close) + ).join(headers_close) )) or {} pv.close_conn(fd) @@ -602,7 +602,7 @@ fn handle_read[A, X](mut pv picoev.Picoev, mut params RequestParams, fd int) { // small optimization: if the response is small write it immediately // the socket is most likely able to write all the data without blocking. // See Context.send_file for why we use max_read instead of max_write. - if completed_context.res.body.len < vweb.max_read { + if completed_context.res.body.len < max_read { fast_send_resp(mut conn, completed_context.res) or {} handle_complete_request(completed_context.client_wants_to_close, mut pv, fd) @@ -615,7 +615,7 @@ fn handle_read[A, X](mut pv picoev.Picoev, mut params RequestParams, fd int) { if res == -1 { // should not happen params.string_responses[fd].done() - fast_send_resp(mut conn, vweb.http_500) or {} + fast_send_resp(mut conn, http_500) or {} handle_complete_request(completed_context.client_wants_to_close, mut pv, fd) return @@ -627,13 +627,13 @@ fn handle_read[A, X](mut pv picoev.Picoev, mut params RequestParams, fd int) { .file { // save file information length := completed_context.res.header.get(.content_length) or { - fast_send_resp(mut conn, vweb.http_500) or {} + fast_send_resp(mut conn, http_500) or {} return } params.file_responses[fd].total = length.i64() params.file_responses[fd].file = os.open(completed_context.return_file) or { // Context checks if the file is valid, so this should never happen - fast_send_resp(mut conn, vweb.http_500) or {} + fast_send_resp(mut conn, http_500) or {} params.file_responses[fd].done() pv.close_conn(fd) return @@ -644,7 +644,7 @@ fn handle_read[A, X](mut pv picoev.Picoev, mut params RequestParams, fd int) { // picoev error if res == -1 { // should not happen - fast_send_resp(mut conn, vweb.http_500) or {} + fast_send_resp(mut conn, http_500) or {} params.file_responses[fd].done() pv.close_conn(fd) return @@ -690,7 +690,7 @@ fn handle_request[A, X](mut conn net.TcpConn, req http.Request, params &RequestP form, files := parse_form_from_request(req) or { // Bad request eprintln('[vweb] error parsing form: ${err.msg()}') - conn.write(vweb.http_400.bytes()) or {} + conn.write(http_400.bytes()) or {} return none } @@ -982,7 +982,7 @@ fn serve_if_static[A, X](app &A, mut user_context X, url urllib.URL, host string // StaticHandler ensures that the mime type exists on either the App or in vweb ext := os.file_ext(static_file) - mut mime_type := app.static_mime_types[ext] or { vweb.mime_types[ext] } + mut mime_type := app.static_mime_types[ext] or { mime_types[ext] } static_host := app.static_hosts[asked_path] or { '' } if static_file == '' || mime_type == '' { diff --git a/vlib/x/vweb/vweb_livereload.v b/vlib/x/vweb/vweb_livereload.v index 5e0fcdca2d6531..9800ca34f479f0 100644 --- a/vlib/x/vweb/vweb_livereload.v +++ b/vlib/x/vweb/vweb_livereload.v @@ -21,7 +21,7 @@ const vweb_livereload_server_start = time.ticks().str() // timestamp/ticks corresponding to when the vweb server process was started @[if vweb_livereload ?] fn (mut ctx Context) handle_vweb_livereload_current() { - ctx.send_response_to_client('text/plain', vweb.vweb_livereload_server_start) + ctx.send_response_to_client('text/plain', vweb_livereload_server_start) } // handle_vweb_livereload_script serves a small dynamically generated .js file, @@ -42,7 +42,7 @@ function vweb_livereload_checker_fn(started_at) { } }); } -const vweb_livereload_checker = setInterval(vweb_livereload_checker_fn, ${ctx.livereload_poll_interval_ms}, "${vweb.vweb_livereload_server_start}"); +const vweb_livereload_checker = setInterval(vweb_livereload_checker_fn, ${ctx.livereload_poll_interval_ms}, "${vweb_livereload_server_start}"); ' ctx.send_response_to_client('text/javascript', res) }