From f709b7d64fbe56ef461df0e691255419d62d8839 Mon Sep 17 00:00:00 2001 From: ulises-jeremias Date: Sun, 5 Nov 2023 01:26:07 -0300 Subject: [PATCH] Updated plotly method and examples --- examples/deriv_example/main.v | 30 +- examples/diff_example/main.v | 30 +- examples/dist_histogram/main.v | 72 ++--- examples/io_h5_dataset/main.v | 18 +- examples/io_h5_relax/main.v | 52 ++-- examples/iter_lazy_generation/main.v | 18 +- examples/la_triplet01/main.v | 32 ++- examples/ml_kmeans/main.v | 82 +++--- examples/ml_kmeans_plot/main.v | 46 +-- examples/ml_linreg01/main.v | 56 ++-- examples/ml_linreg02/main.v | 58 ++-- examples/ml_sentiment_analysis/main.v | 364 ++++++++++++------------ examples/mpi_basic_example/main.v | 82 +++--- examples/mpi_bcast_example/main.v | 82 +++--- examples/plot_basic_heatmap/main.v | 32 ++- examples/plot_histogram/main.v | 36 +-- examples/plot_scatter/main.v | 68 ++--- examples/plot_scatter_colorscale/main.v | 62 ++-- examples/prime_factorization/main.v | 1 - examples/roots_bisection_solver/main.v | 86 +++--- examples/vcl_opencl_basic/main.v | 80 +++--- plot/show.v | 11 +- 22 files changed, 725 insertions(+), 673 deletions(-) diff --git a/examples/deriv_example/main.v b/examples/deriv_example/main.v index 785992cb3..bf6e79c46 100644 --- a/examples/deriv_example/main.v +++ b/examples/deriv_example/main.v @@ -9,21 +9,23 @@ fn pow(x f64, _ []f64) f64 { return math.pow(x, 1.5) } -f := func.Fn.new(f: pow) -println('f(x) = x^(3/2)') +fn main() { + f := func.Fn.new(f: pow) + println('f(x) = x^(3/2)') -mut expected := 1.5 * math.sqrt(2.0) -mut result, mut abserr := deriv.central(f, 2.0, 1e-8) -println('x = 2.0') -println("f'(x) = ${result} +/- ${abserr}") -println('exact = ${expected}') + mut expected := 1.5 * math.sqrt(2.0) + mut result, mut abserr := deriv.central(f, 2.0, 1e-8) + println('x = 2.0') + println("f'(x) = ${result} +/- ${abserr}") + println('exact = ${expected}') -assert float64.soclose(result, expected, abserr) + assert float64.soclose(result, expected, abserr) -expected = 0.0 -result, abserr = deriv.forward(f, 0.0, 1e-8) -println('x = 0.0') -println("f'(x) = ${result} +/- ${abserr}") -println('exact = ${expected}') + expected = 0.0 + result, abserr = deriv.forward(f, 0.0, 1e-8) + println('x = 0.0') + println("f'(x) = ${result} +/- ${abserr}") + println('exact = ${expected}') -assert float64.soclose(result, expected, abserr) + assert float64.soclose(result, expected, abserr) +} diff --git a/examples/diff_example/main.v b/examples/diff_example/main.v index 02b1a8743..5a80e423a 100644 --- a/examples/diff_example/main.v +++ b/examples/diff_example/main.v @@ -9,21 +9,23 @@ fn pow(x f64, _ []f64) f64 { return math.pow(x, 1.5) } -f := func.Fn.new(f: pow) -println('f(x) = x^(3/2)') +fn main() { + f := func.Fn.new(f: pow) + println('f(x) = x^(3/2)') -mut expected := 1.5 * math.sqrt(2.0) -mut result, mut abserr := diff.central(f, 2.0) -println('x = 2.0') -println("f'(x) = ${result} +/- ${abserr}") -println('exact = ${expected}') + mut expected := 1.5 * math.sqrt(2.0) + mut result, mut abserr := diff.central(f, 2.0) + println('x = 2.0') + println("f'(x) = ${result} +/- ${abserr}") + println('exact = ${expected}') -assert float64.soclose(result, expected, abserr) + assert float64.soclose(result, expected, abserr) -expected = 0.0 -result, abserr = diff.forward(f, 0.0) -println('x = 0.0') -println("f'(x) = ${result} +/- ${abserr}") -println('exact = ${expected}') + expected = 0.0 + result, abserr = diff.forward(f, 0.0) + println('x = 0.0') + println("f'(x) = ${result} +/- ${abserr}") + println('exact = ${expected}') -assert float64.soclose(result, expected, abserr) + assert float64.soclose(result, expected, abserr) +} diff --git a/examples/dist_histogram/main.v b/examples/dist_histogram/main.v index 60d9f12b4..12d650477 100644 --- a/examples/dist_histogram/main.v +++ b/examples/dist_histogram/main.v @@ -2,43 +2,45 @@ module main import vsl.dist -lims := [0.0, 1, 2, 3, 4, 5] +fn main() { + lims := [0.0, 1, 2, 3, 4, 5] -mut hist := dist.Histogram.new(lims) + mut hist := dist.Histogram.new(lims) -values := [ - 0.0, - 0.1, - 0.2, - 0.3, - 0.9, - 1, - 1, - 1, - 1.2, - 1.3, - 1.4, - 1.5, - 1.99, - 2, - 2.5, - 3, - 3.5, - 4.1, - 4.5, - 4.9, - -3, - -2, - -1, - 5, - 6, - 7, - 8, -] + values := [ + 0.0, + 0.1, + 0.2, + 0.3, + 0.9, + 1, + 1, + 1, + 1.2, + 1.3, + 1.4, + 1.5, + 1.99, + 2, + 2.5, + 3, + 3.5, + 4.1, + 4.5, + 4.9, + -3, + -2, + -1, + 5, + 6, + 7, + 8, + ] -hist.count(values, true)! -labels := hist.gen_labels('%g')! + hist.count(values, true)! + labels := hist.gen_labels('%g')! -hist_text := dist.text_hist(labels, hist.counts, 0)! + hist_text := dist.text_hist(labels, hist.counts, 0)! -println(hist_text) + println(hist_text) +} diff --git a/examples/io_h5_dataset/main.v b/examples/io_h5_dataset/main.v index 8628597ab..a1226db3d 100644 --- a/examples/io_h5_dataset/main.v +++ b/examples/io_h5_dataset/main.v @@ -2,13 +2,15 @@ import vsl.inout.h5 import math.stats import rand -linedata := []f64{len: 21, init: (0 * index) + rand.f64()} -mut meanv := 0.0 -hdffile := 'hdffile.h5' +fn main() { + linedata := []f64{len: 21, init: (0 * index) + rand.f64()} + mut meanv := 0.0 + hdffile := 'hdffile.h5' -meanv = stats.mean(linedata) + meanv = stats.mean(linedata) -f := h5.Hdf5File.new(hdffile)! -f.write_dataset1d('/randdata', linedata)! -f.write_attribute('/randdata', 'mean', meanv)! -f.close() + f := h5.Hdf5File.new(hdffile)! + f.write_dataset1d('/randdata', linedata)! + f.write_attribute('/randdata', 'mean', meanv)! + f.close() +} diff --git a/examples/io_h5_relax/main.v b/examples/io_h5_relax/main.v index 7043fdcb4..474039b88 100644 --- a/examples/io_h5_relax/main.v +++ b/examples/io_h5_relax/main.v @@ -1,32 +1,34 @@ import vsl.inout.h5 import math -// A simple 1d relaxation problem. Write the results -// and two attributes of the calculation to an HDF5 file. -// To see the results, use `h5dump ex1_hdffile.h5` -mut linedata := []f64{len: 21} -mut newv := 0.0 -hdffile := 'ex1_hdffile.h5' -mut rounds := i32(0) +fn main() { + // A simple 1d relaxation problem. Write the results + // and two attributes of the calculation to an HDF5 file. + // To see the results, use `h5dump ex1_hdffile.h5` + mut linedata := []f64{len: 21} + mut newv := 0.0 + hdffile := 'ex1_hdffile.h5' + mut rounds := i32(0) -linedata[0] = -2.0 -linedata[20] = 3.0 -mut maxdiff := 0.0 + linedata[0] = -2.0 + linedata[20] = 3.0 + mut maxdiff := 0.0 -for loop in 0 .. 1000 { - maxdiff = -math.max_f64 - for i in 1 .. linedata.len - 1 { - newv = (linedata[i - 1] + linedata[i] + linedata[i + 1]) / 3.00 - maxdiff = math.max(maxdiff, math.abs(newv - linedata[i])) - linedata[i] = newv - } - rounds = loop - if maxdiff < 0.0001 { - break + for loop in 0 .. 1000 { + maxdiff = -math.max_f64 + for i in 1 .. linedata.len - 1 { + newv = (linedata[i - 1] + linedata[i] + linedata[i + 1]) / 3.00 + maxdiff = math.max(maxdiff, math.abs(newv - linedata[i])) + linedata[i] = newv + } + rounds = loop + if maxdiff < 0.0001 { + break + } } + f := h5.Hdf5File.new(hdffile)! + f.write_dataset1d('linedata', linedata)! + f.write_attribute('linedata', 'rounds', rounds)! + f.write_attribute('linedata', 'maxdiff', maxdiff)! + f.close() } -f := h5.Hdf5File.new(hdffile)! -f.write_dataset1d('linedata', linedata)! -f.write_attribute('linedata', 'rounds', rounds)! -f.write_attribute('linedata', 'maxdiff', maxdiff)! -f.close() diff --git a/examples/iter_lazy_generation/main.v b/examples/iter_lazy_generation/main.v index 9a5789d69..e0e48a856 100644 --- a/examples/iter_lazy_generation/main.v +++ b/examples/iter_lazy_generation/main.v @@ -2,9 +2,17 @@ module main import vsl.iter -data := [1.0, 2.0, 3.0] -r := 3 -mut combs := iter.CombinationsIter.new(data, r) -for comb in combs { - print(comb) +fn main() { + data := [1.0, 2.0, 3.0] + r := 3 + mut count := 0 + + mut combs := iter.CombinationsIter.new(data, r) + + for comb in combs { + print(comb) + count += 1 + } + + assert count == 3 } diff --git a/examples/la_triplet01/main.v b/examples/la_triplet01/main.v index 280e6c18e..628e4c03c 100644 --- a/examples/la_triplet01/main.v +++ b/examples/la_triplet01/main.v @@ -2,22 +2,24 @@ module main import vsl.la -mut a := la.Triplet.new[f64](4, 4, 6) +fn main() { + mut a := la.Triplet.new[f64](4, 4, 6) -a.put(1, 0, 1.0)! -a.put(0, 1, 2.0)! -a.put(3, 1, 3.0)! -a.put(1, 2, 4.0)! -a.put(2, 3, 5.0)! -a.put(3, 3, 6.0)! + a.put(1, 0, 1.0)! + a.put(0, 1, 2.0)! + a.put(3, 1, 3.0)! + a.put(1, 2, 4.0)! + a.put(2, 3, 5.0)! + a.put(3, 3, 6.0)! -mut expected_matrix := la.Matrix.deep2([ - [0.0, 2, 0, 0], - [1.0, 0, 4, 0], - [0.0, 0, 0, 5], - [0.0, 3, 0, 6], -]) + mut expected_matrix := la.Matrix.deep2([ + [0.0, 2, 0, 0], + [1.0, 0, 4, 0], + [0.0, 0, 0, 5], + [0.0, 3, 0, 6], + ]) -m := a.to_dense() + m := a.to_dense() -assert expected_matrix.equals(m) + assert expected_matrix.equals(m) +} diff --git a/examples/ml_kmeans/main.v b/examples/ml_kmeans/main.v index a6f753d66..cdb4ba63b 100644 --- a/examples/ml_kmeans/main.v +++ b/examples/ml_kmeans/main.v @@ -2,49 +2,51 @@ module main import vsl.ml -// data -mut data := ml.Data.from_raw_x([ - [0.1, 0.7], - [0.3, 0.7], - [0.1, 0.9], - [0.3, 0.9], - [0.7, 0.1], - [0.9, 0.1], - [0.7, 0.3], - [0.9, 0.3], -])! +fn main() { + // data + mut data := ml.Data.from_raw_x([ + [0.1, 0.7], + [0.3, 0.7], + [0.1, 0.9], + [0.3, 0.9], + [0.7, 0.1], + [0.9, 0.1], + [0.7, 0.3], + [0.9, 0.3], + ])! -// model -nb_classes := 2 -mut model := ml.Kmeans.new(mut data, nb_classes, 'kmeans') -model.set_centroids([ - // class 0 - [0.4, 0.6], - // class 1 - [0.6, 0.4], -]) + // model + nb_classes := 2 + mut model := ml.Kmeans.new(mut data, nb_classes, 'kmeans') + model.set_centroids([ + // class 0 + [0.4, 0.6], + // class 1 + [0.6, 0.4], + ]) -// initial classes -model.find_closest_centroids() + // initial classes + model.find_closest_centroids() -// initial computation of centroids -model.compute_centroids() + // initial computation of centroids + model.compute_centroids() -// train -model.train(epochs: 6) + // train + model.train(epochs: 6) -// test -expected_classes := [ - 0, - 0, - 0, - 0, - 1, - 1, - 1, - 1, -] -for i, c in model.classes { - assert c == expected_classes[i] - println('class ${i}: ${c}') + // test + expected_classes := [ + 0, + 0, + 0, + 0, + 1, + 1, + 1, + 1, + ] + for i, c in model.classes { + assert c == expected_classes[i] + println('class ${i}: ${c}') + } } diff --git a/examples/ml_kmeans_plot/main.v b/examples/ml_kmeans_plot/main.v index c0bcb3641..ef28fe1a9 100644 --- a/examples/ml_kmeans_plot/main.v +++ b/examples/ml_kmeans_plot/main.v @@ -3,30 +3,32 @@ module main import vsl.ml { Data, Kmeans } import internal.dataset -// data -x := dataset.raw_dataset.map([it[0], it[1]]) -mut data := Data.from_raw_x(x)! +fn main() { + // data + x := dataset.raw_dataset.map([it[0], it[1]]) + mut data := Data.from_raw_x(x)! -// model -nb_classes := 3 -mut model := Kmeans.new(mut data, nb_classes, 'kmeans') -model.set_centroids([ - // class 0 - [3.0, 3], - // class 1 - [6.0, 2], - // class 2 - [8.0, 5], -]) + // model + nb_classes := 3 + mut model := Kmeans.new(mut data, nb_classes, 'kmeans') + model.set_centroids([ + // class 0 + [3.0, 3], + // class 1 + [6.0, 2], + // class 2 + [8.0, 5], + ]) -// initial classes -model.find_closest_centroids() + // initial classes + model.find_closest_centroids() -// initial computation of centroids -model.compute_centroids() + // initial computation of centroids + model.compute_centroids() -// train -model.train(epochs: 6) + // train + model.train(epochs: 6) -// Plot the results using the new plot method -model.plot()! + // Plot the results using the new plot method + model.plot()! +} diff --git a/examples/ml_linreg01/main.v b/examples/ml_linreg01/main.v index 06ce7cd04..47981d407 100644 --- a/examples/ml_linreg01/main.v +++ b/examples/ml_linreg01/main.v @@ -3,31 +3,33 @@ module main // import vsl.float.float64 import vsl.ml -xy := [ - [0.99, 90.01], - [1.02, 89.05], - [1.15, 91.43], - [1.29, 93.74], - [1.46, 96.73], - [1.36, 94.45], - [0.87, 87.59], - [1.23, 91.77], - [1.55, 99.42], - [1.40, 93.65], - [1.19, 93.54], - [1.15, 92.52], - [0.98, 90.56], - [1.01, 89.54], - [1.11, 89.85], - [1.20, 90.39], - [1.26, 93.25], - [1.32, 93.41], - [1.43, 94.98], - [0.95, 87.33], -] -mut data := ml.Data.from_raw_xy(xy)! -mut reg := ml.LinReg.new(mut data, 'linear regression') +fn main() { + xy := [ + [0.99, 90.01], + [1.02, 89.05], + [1.15, 91.43], + [1.29, 93.74], + [1.46, 96.73], + [1.36, 94.45], + [0.87, 87.59], + [1.23, 91.77], + [1.55, 99.42], + [1.40, 93.65], + [1.19, 93.54], + [1.15, 92.52], + [0.98, 90.56], + [1.01, 89.54], + [1.11, 89.85], + [1.20, 90.39], + [1.26, 93.25], + [1.32, 93.41], + [1.43, 94.98], + [0.95, 87.33], + ] + mut data := ml.Data.from_raw_xy(xy)! + mut reg := ml.LinReg.new(mut data, 'linear regression') -reg.train() -// TODO: Fix this test -// assert float64.tolerance(reg.cost(), 5.312454218805082e-01, 1e-15) + reg.train() + // TODO: Fix this test + // assert float64.tolerance(reg.cost(), 5.312454218805082e-01, 1e-15) +} diff --git a/examples/ml_linreg02/main.v b/examples/ml_linreg02/main.v index a35204023..1648de934 100644 --- a/examples/ml_linreg02/main.v +++ b/examples/ml_linreg02/main.v @@ -2,34 +2,36 @@ module main import vsl.ml -xy := [ - [0.99, 90.01], - [1.02, 89.05], - [1.15, 91.43], - [1.29, 93.74], - [1.46, 96.73], - [1.36, 94.45], - [0.87, 87.59], - [1.23, 91.77], - [1.55, 99.42], - [1.40, 93.65], - [1.19, 93.54], - [1.15, 92.52], - [0.98, 90.56], - [1.01, 89.54], - [1.11, 89.85], - [1.20, 90.39], - [1.26, 93.25], - [1.32, 93.41], - [1.43, 94.98], - [0.95, 87.33], -] -mut data := ml.Data.from_raw_xy(xy)! -mut reg := ml.LinReg.new(mut data, 'linear regression') +fn main() { + xy := [ + [0.99, 90.01], + [1.02, 89.05], + [1.15, 91.43], + [1.29, 93.74], + [1.46, 96.73], + [1.36, 94.45], + [0.87, 87.59], + [1.23, 91.77], + [1.55, 99.42], + [1.40, 93.65], + [1.19, 93.54], + [1.15, 92.52], + [0.98, 90.56], + [1.01, 89.54], + [1.11, 89.85], + [1.20, 90.39], + [1.26, 93.25], + [1.32, 93.41], + [1.43, 94.98], + [0.95, 87.33], + ] + mut data := ml.Data.from_raw_xy(xy)! + mut reg := ml.LinReg.new(mut data, 'linear regression') -reg.train() + reg.train() -for x0 in [0.8, 1.2, 2.0] { - pred := reg.predict([x0]) - println('x0: ${x0}, pred: ${pred}') + for x0 in [0.8, 1.2, 2.0] { + pred := reg.predict([x0]) + println('x0: ${x0}, pred: ${pred}') + } } diff --git a/examples/ml_sentiment_analysis/main.v b/examples/ml_sentiment_analysis/main.v index 480161853..6ce0aaf3e 100644 --- a/examples/ml_sentiment_analysis/main.v +++ b/examples/ml_sentiment_analysis/main.v @@ -12,201 +12,203 @@ const dataset = [ ['You are a bad person.', 'negative'], ] -// Arbitrary values. Since nlp.KNN only takes floats, we must represent -// our classes (positive or negative) as floats. -class := { - 'positive': 1.0 - 'negative': -1.0 -} - -class_inverse := { - '1.0': 'positive' - '-1.0': 'negative' -} - -// Now we want to convert our dataset to a numeric representation. -// We can do this in two ways: using bag of words (count vectorizer) -// or with TF-IDF. We must, however, prepare our data first. - -// Some words such as "do", "am", "are" are useless in our context, -// and they are included in VSL's default stopword list. We must remove -// those stopwords because they don't mean anything for us when trying -// to analyze sentiments. Punctuation is also not needed, so we will be -// using nlp.remove_punctuation and set everything to lowercase. However, -// we must first tokenize our sentence (divide it into single words). -// It is not complicated at all, see below. Let's also convert our classes -// to the numerical representation. -mut tokens := [][]string{} -mut labels := []f64{} -for row in dataset { - tokenized := nlp.tokenize(nlp.remove_punctuation(row[0]).to_lower()) - - // To specify custom stopwords for your language, run: - // nlp.remove_stopwords(tokens []string, stopwords []string, ignore_case bool). - tokens << nlp.remove_stopwords_en(tokenized, true) // Last parameter is a flag to set everything to lowercase. If set to false, case is kept. - labels << class[row[1]] -} - -// After that, an important step will be stemming. What does that do? -// Well, stemming is keeping the radicals of words so that terms such -// as "try", "tried" and "trying" are considered the same token: "try". -// Let's create a stemmer: -mut lancaster := nlp.LancasterStemmer.new(true) // Parameter is strip_prefix. If true, "kilogram" becomes "gram", for example. - -// List of sentences as ngrams, read the comments -// below to understand. -mut ngrams := [][][]string{} - -mut all_ngrams := [][]string{} // Will be used later. -for i in 0 .. dataset.len { - // The map function is an array operation. - // Refer to https://github.com/vlang/v/blob/master/doc/docs.md#array-operations - mut stemmed := []string{} - for token in tokens[i] { - // There is an option/result below because the LancasterStemmer - // accepts custom rules (which we will not be getting into), and - // it makes sure they are valid. For this example, the code below - // should NOT throw errors. - stemmed << lancaster.stem(token)! +fn main() { + // Arbitrary values. Since nlp.KNN only takes floats, we must represent + // our classes (positive or negative) as floats. + class := { + 'positive': 1.0 + 'negative': -1.0 } - // Now we need to extract ngrams. Since VSL provides ngram support, - // it is important that we only use ngrams (even if n = 1) due to V - // limitations. If you don't know it already, ngrams are groups of - // n tokens. Here is an example: - // 2grams from ['hello', 'my', 'friend'] = [['hello', 'my'], ['my', 'friend']] - // Since in our dataset the previous token doesn't matter ('good' and - // 'bad' are the only decisive factors), our n is going to be 1. - // For the example above, 1grams would be [['hello'], ['my'], ['friend']]. - // The option/result below is in case you pass negative values to nlp.ngrams. - ng := nlp.ngrams(stemmed, 1)! // [][]string - ngrams << ng // appends [][]string to ngrams ([][][]string) - all_ngrams << ng // extends all_ngrams([][]string) by [][]string -} + class_inverse := { + '1.0': 'positive' + '-1.0': 'negative' + } -// Now we must choose a method for sentiment analysis: bag of words or TF-IDF. -// We will be working with both for a more indepth example! First, let's do -// bag of words. It is implemented using nlp.most_frequent_ngrams and nlp.count_vectorize. -// We need to specify a number of features (that is, how many ngrams will be -// returned by nlp.most_frequent_ngrams). Since in this case the decisive tokens -// in our dataset are "good" and "bad", lets go for a low number: 5. This number -// should be higher if you use ngrams > 1 and have more data. -n_features := 5 -mut most_freq := nlp.most_frequent_ngrams(all_ngrams, n_features)! // [][]string - -// Now, for each sentence, we will get the array of number of occurrences -// for each ngram in it. Sounds confusing? Well, here's an example: -// Say your most frequent 1grams are [['good'], ['bad']] and the number of -// features is two. The count_vectorize of the sentence (as 1grams): -// [['you'], ['are'], ['a'], ['good'], ['person'], ['and'], ['good'], ['looking']] -// would be [2, 0] because there are 2 instances of ['good'] and 0 of ['bad']. -mut vectorized := [][]f64{} -for i in 0 .. dataset.len { - // We have to use array.map() to cast all values from int (original - // return type of nlp.count_vectorize) to f64, to feed to our KNN model - // in the next steps. - vectorized << nlp.count_vectorize(ngrams[i], most_freq).map(f64(it)) -} + // Now we want to convert our dataset to a numeric representation. + // We can do this in two ways: using bag of words (count vectorizer) + // or with TF-IDF. We must, however, prepare our data first. + + // Some words such as "do", "am", "are" are useless in our context, + // and they are included in VSL's default stopword list. We must remove + // those stopwords because they don't mean anything for us when trying + // to analyze sentiments. Punctuation is also not needed, so we will be + // using nlp.remove_punctuation and set everything to lowercase. However, + // we must first tokenize our sentence (divide it into single words). + // It is not complicated at all, see below. Let's also convert our classes + // to the numerical representation. + mut tokens := [][]string{} + mut labels := []f64{} + for row in dataset { + tokenized := nlp.tokenize(nlp.remove_punctuation(row[0]).to_lower()) + + // To specify custom stopwords for your language, run: + // nlp.remove_stopwords(tokens []string, stopwords []string, ignore_case bool). + tokens << nlp.remove_stopwords_en(tokenized, true) // Last parameter is a flag to set everything to lowercase. If set to false, case is kept. + labels << class[row[1]] + } -// Amazing! We have all we need to train a sentiment analysis model with -// bag of words. Check it out: -mut training_data := ml.Data.from_raw_xy_sep(vectorized, labels)! -mut bow_knn := ml.KNN.new(mut training_data, 'BagOfWordsKNN')! - -sentence1 := 'I think today is a good day' // should be positive -sentence2 := 'I hate grape juice, it tastes bad.' // should be negative - -// In order to predict them, we have to do the same we did for all -// our training samples: tokenize, stem and ngramize (does that term -// even exist?) -bow := fn (sent string, mut lan nlp.LancasterStemmer, mf [][]string) ![]f64 { - sent_tokenized := nlp.remove_stopwords_en(nlp.tokenize(nlp.remove_punctuation(sent).to_lower()), - true) - mut sent_stemmed := []string{} - for tok in sent_tokenized { - sent_stemmed << lan.stem(tok)! + // After that, an important step will be stemming. What does that do? + // Well, stemming is keeping the radicals of words so that terms such + // as "try", "tried" and "trying" are considered the same token: "try". + // Let's create a stemmer: + mut lancaster := nlp.LancasterStemmer.new(true) // Parameter is strip_prefix. If true, "kilogram" becomes "gram", for example. + + // List of sentences as ngrams, read the comments + // below to understand. + mut ngrams := [][][]string{} + + mut all_ngrams := [][]string{} // Will be used later. + for i in 0 .. dataset.len { + // The map function is an array operation. + // Refer to https://github.com/vlang/v/blob/master/doc/docs.md#array-operations + mut stemmed := []string{} + for token in tokens[i] { + // There is an option/result below because the LancasterStemmer + // accepts custom rules (which we will not be getting into), and + // it makes sure they are valid. For this example, the code below + // should NOT throw errors. + stemmed << lancaster.stem(token)! + } + + // Now we need to extract ngrams. Since VSL provides ngram support, + // it is important that we only use ngrams (even if n = 1) due to V + // limitations. If you don't know it already, ngrams are groups of + // n tokens. Here is an example: + // 2grams from ['hello', 'my', 'friend'] = [['hello', 'my'], ['my', 'friend']] + // Since in our dataset the previous token doesn't matter ('good' and + // 'bad' are the only decisive factors), our n is going to be 1. + // For the example above, 1grams would be [['hello'], ['my'], ['friend']]. + // The option/result below is in case you pass negative values to nlp.ngrams. + ng := nlp.ngrams(stemmed, 1)! // [][]string + ngrams << ng // appends [][]string to ngrams ([][][]string) + all_ngrams << ng // extends all_ngrams([][]string) by [][]string } - sent_ngrams := nlp.ngrams(sent_stemmed, 1)! - return nlp.count_vectorize(sent_ngrams, mf).map(f64(it)) -} -bow_prediction1 := bow_knn.predict( - k: 2 - // low value due to small dataset - to_pred: bow(sentence1, mut lancaster, most_freq)! -)! -bow_prediction2 := bow_knn.predict( - k: 2 - // low value due to small dataset - to_pred: bow(sentence2, mut lancaster, most_freq)! -)! - -// Convert from numeric representation to text: -// 1.0: positive, -1.0: negative. -bow_label1 := class_inverse[bow_prediction1.str()] -bow_label2 := class_inverse[bow_prediction2.str()] - -println('"${sentence1}" predicted as "${bow_label1}" using bag of words.') -println('"${sentence2}" predicted as "${bow_label2}" using bag of words.') - -// Now let's use TF-IDF and see if we get something different. -// It takes a document (an array of sentences, which by itself is an array -// of ngrams, so [][][]string), which in this case is the variable `ngrams`. -mut unique_ngrams := [][]string{} -for ng in all_ngrams { - if ng !in unique_ngrams { - unique_ngrams << ng + // Now we must choose a method for sentiment analysis: bag of words or TF-IDF. + // We will be working with both for a more indepth example! First, let's do + // bag of words. It is implemented using nlp.most_frequent_ngrams and nlp.count_vectorize. + // We need to specify a number of features (that is, how many ngrams will be + // returned by nlp.most_frequent_ngrams). Since in this case the decisive tokens + // in our dataset are "good" and "bad", lets go for a low number: 5. This number + // should be higher if you use ngrams > 1 and have more data. + n_features := 5 + mut most_freq := nlp.most_frequent_ngrams(all_ngrams, n_features)! // [][]string + + // Now, for each sentence, we will get the array of number of occurrences + // for each ngram in it. Sounds confusing? Well, here's an example: + // Say your most frequent 1grams are [['good'], ['bad']] and the number of + // features is two. The count_vectorize of the sentence (as 1grams): + // [['you'], ['are'], ['a'], ['good'], ['person'], ['and'], ['good'], ['looking']] + // would be [2, 0] because there are 2 instances of ['good'] and 0 of ['bad']. + mut vectorized := [][]f64{} + for i in 0 .. dataset.len { + // We have to use array.map() to cast all values from int (original + // return type of nlp.count_vectorize) to f64, to feed to our KNN model + // in the next steps. + vectorized << nlp.count_vectorize(ngrams[i], most_freq).map(f64(it)) } -} -// Number of features will be the number of unique ngrams, just -// for the sake it. You can truncate and pad to a fixed length -// if you want to, but let's do it this other way to have somewhat -// complete sentence information. -mut tf_idf_rows := [][]f64{} - -// Remember, `ngrams` is the ngram representation of each sentence -// in our dataset. -for sent in ngrams { - mut tf_idf_sentence := []f64{} - for u_ng in unique_ngrams { - tf_idf_sentence << nlp.tf_idf(u_ng, sent, ngrams)! + // Amazing! We have all we need to train a sentiment analysis model with + // bag of words. Check it out: + mut training_data := ml.Data.from_raw_xy_sep(vectorized, labels)! + mut bow_knn := ml.KNN.new(mut training_data, 'BagOfWordsKNN')! + + sentence1 := 'I think today is a good day' // should be positive + sentence2 := 'I hate grape juice, it tastes bad.' // should be negative + + // In order to predict them, we have to do the same we did for all + // our training samples: tokenize, stem and ngramize (does that term + // even exist?) + bow := fn (sent string, mut lan nlp.LancasterStemmer, mf [][]string) ![]f64 { + sent_tokenized := nlp.remove_stopwords_en(nlp.tokenize(nlp.remove_punctuation(sent).to_lower()), + true) + mut sent_stemmed := []string{} + for tok in sent_tokenized { + sent_stemmed << lan.stem(tok)! + } + sent_ngrams := nlp.ngrams(sent_stemmed, 1)! + return nlp.count_vectorize(sent_ngrams, mf).map(f64(it)) } - tf_idf_rows << tf_idf_sentence -} -training_data = ml.Data.from_raw_xy_sep(tf_idf_rows, labels)! -mut tf_idf_knn := ml.KNN.new(mut training_data, 'TfIdfKNN')! + bow_prediction1 := bow_knn.predict( + k: 2 + // low value due to small dataset + to_pred: bow(sentence1, mut lancaster, most_freq)! + )! + bow_prediction2 := bow_knn.predict( + k: 2 + // low value due to small dataset + to_pred: bow(sentence2, mut lancaster, most_freq)! + )! + + // Convert from numeric representation to text: + // 1.0: positive, -1.0: negative. + bow_label1 := class_inverse[bow_prediction1.str()] + bow_label2 := class_inverse[bow_prediction2.str()] + + println('"${sentence1}" predicted as "${bow_label1}" using bag of words.') + println('"${sentence2}" predicted as "${bow_label2}" using bag of words.') + + // Now let's use TF-IDF and see if we get something different. + // It takes a document (an array of sentences, which by itself is an array + // of ngrams, so [][][]string), which in this case is the variable `ngrams`. + mut unique_ngrams := [][]string{} + for ng in all_ngrams { + if ng !in unique_ngrams { + unique_ngrams << ng + } + } -tfidf := fn (sent string, mut lan nlp.LancasterStemmer, document [][][]string, unique [][]string) ![]f64 { - sent_tokenized := nlp.remove_stopwords_en(nlp.tokenize(nlp.remove_punctuation(sent).to_lower()), - true) - mut sent_stemmed := []string{} - for tok in sent_tokenized { - sent_stemmed << lan.stem(tok)! + // Number of features will be the number of unique ngrams, just + // for the sake it. You can truncate and pad to a fixed length + // if you want to, but let's do it this other way to have somewhat + // complete sentence information. + mut tf_idf_rows := [][]f64{} + + // Remember, `ngrams` is the ngram representation of each sentence + // in our dataset. + for sent in ngrams { + mut tf_idf_sentence := []f64{} + for u_ng in unique_ngrams { + tf_idf_sentence << nlp.tf_idf(u_ng, sent, ngrams)! + } + tf_idf_rows << tf_idf_sentence } - mut sent_ng := nlp.ngrams(sent_stemmed, 1)! - mut tf_idf_sentence := []f64{} - for u_ng in unique { - tf_idf_sentence << nlp.tf_idf(u_ng, sent_ng, document)! + + training_data = ml.Data.from_raw_xy_sep(tf_idf_rows, labels)! + mut tf_idf_knn := ml.KNN.new(mut training_data, 'TfIdfKNN')! + + tfidf := fn (sent string, mut lan nlp.LancasterStemmer, document [][][]string, unique [][]string) ![]f64 { + sent_tokenized := nlp.remove_stopwords_en(nlp.tokenize(nlp.remove_punctuation(sent).to_lower()), + true) + mut sent_stemmed := []string{} + for tok in sent_tokenized { + sent_stemmed << lan.stem(tok)! + } + mut sent_ng := nlp.ngrams(sent_stemmed, 1)! + mut tf_idf_sentence := []f64{} + for u_ng in unique { + tf_idf_sentence << nlp.tf_idf(u_ng, sent_ng, document)! + } + return tf_idf_sentence } - return tf_idf_sentence -} -tf_idf_prediction1 := tf_idf_knn.predict( - k: 2 - // low value due to small dataset - to_pred: tfidf(sentence1, mut lancaster, ngrams, unique_ngrams)! -)! + tf_idf_prediction1 := tf_idf_knn.predict( + k: 2 + // low value due to small dataset + to_pred: tfidf(sentence1, mut lancaster, ngrams, unique_ngrams)! + )! -tf_idf_prediction2 := tf_idf_knn.predict( - k: 2 - // low value due to small dataset - to_pred: tfidf(sentence2, mut lancaster, ngrams, unique_ngrams)! -)! + tf_idf_prediction2 := tf_idf_knn.predict( + k: 2 + // low value due to small dataset + to_pred: tfidf(sentence2, mut lancaster, ngrams, unique_ngrams)! + )! -tf_idf_label1 := class_inverse[tf_idf_prediction1.str()] -tf_idf_label2 := class_inverse[tf_idf_prediction2.str()] + tf_idf_label1 := class_inverse[tf_idf_prediction1.str()] + tf_idf_label2 := class_inverse[tf_idf_prediction2.str()] -println('"${sentence1}" predicted as "${tf_idf_label1}" using TF-IDF.') -println('"${sentence2}" predicted as "${tf_idf_label2}" using TF-IDF.') + println('"${sentence1}" predicted as "${tf_idf_label1}" using TF-IDF.') + println('"${sentence2}" predicted as "${tf_idf_label2}" using TF-IDF.') +} diff --git a/examples/mpi_basic_example/main.v b/examples/mpi_basic_example/main.v index c5b7ddd72..39a9a44a3 100644 --- a/examples/mpi_basic_example/main.v +++ b/examples/mpi_basic_example/main.v @@ -3,45 +3,47 @@ module main import vsl.float.float64 import vsl.mpi -mpi.initialize()! - -defer { - mpi.finalize() -} - -if mpi.world_rank() == 0 { - println('Test MPI 01') -} - -println('Hello from rank ${mpi.world_rank()}') -println('The world has ${mpi.world_size()} processes') - -n := 11 -mut x := []f64{len: n} -id, sz := mpi.world_rank(), mpi.world_size() -start, endp1 := (id * n) / sz, ((id + 1) * n) / sz -for i := start; i < endp1; i++ { - x[i] = f64(i) -} - -// Communicator -comm := mpi.Communicator.new([])! - -// Barrier -comm.barrier() - -// sum to root -mut r := []f64{len: n} -comm.reduce_sum_f64(mut r, x) -if id == 0 { - assertion := float64.arrays_tolerance(r, []f64{len: n, init: index}, 1e-17) - println('ID: ${id} - Assertion: ${assertion}') -} else { - assertion := float64.arrays_tolerance(r, []f64{len: n}, 1e-17) +fn main() { + mpi.initialize()! + + defer { + mpi.finalize() + } + + if mpi.world_rank() == 0 { + println('Test MPI 01') + } + + println('Hello from rank ${mpi.world_rank()}') + println('The world has ${mpi.world_size()} processes') + + n := 11 + mut x := []f64{len: n} + id, sz := mpi.world_rank(), mpi.world_size() + start, endp1 := (id * n) / sz, ((id + 1) * n) / sz + for i := start; i < endp1; i++ { + x[i] = f64(i) + } + + // Communicator + comm := mpi.Communicator.new([])! + + // Barrier + comm.barrier() + + // sum to root + mut r := []f64{len: n} + comm.reduce_sum_f64(mut r, x) + if id == 0 { + assertion := float64.arrays_tolerance(r, []f64{len: n, init: index}, 1e-17) + println('ID: ${id} - Assertion: ${assertion}') + } else { + assertion := float64.arrays_tolerance(r, []f64{len: n}, 1e-17) + println('ID: ${id} - Assertion: ${assertion}') + } + + r[0] = 123.0 + comm.bcast_from_root_f64(r) + assertion := float64.arrays_tolerance(r, [123.0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 1e-17) println('ID: ${id} - Assertion: ${assertion}') } - -r[0] = 123.0 -comm.bcast_from_root_f64(r) -assertion := float64.arrays_tolerance(r, [123.0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 1e-17) -println('ID: ${id} - Assertion: ${assertion}') diff --git a/examples/mpi_bcast_example/main.v b/examples/mpi_bcast_example/main.v index c5b7ddd72..39a9a44a3 100644 --- a/examples/mpi_bcast_example/main.v +++ b/examples/mpi_bcast_example/main.v @@ -3,45 +3,47 @@ module main import vsl.float.float64 import vsl.mpi -mpi.initialize()! - -defer { - mpi.finalize() -} - -if mpi.world_rank() == 0 { - println('Test MPI 01') -} - -println('Hello from rank ${mpi.world_rank()}') -println('The world has ${mpi.world_size()} processes') - -n := 11 -mut x := []f64{len: n} -id, sz := mpi.world_rank(), mpi.world_size() -start, endp1 := (id * n) / sz, ((id + 1) * n) / sz -for i := start; i < endp1; i++ { - x[i] = f64(i) -} - -// Communicator -comm := mpi.Communicator.new([])! - -// Barrier -comm.barrier() - -// sum to root -mut r := []f64{len: n} -comm.reduce_sum_f64(mut r, x) -if id == 0 { - assertion := float64.arrays_tolerance(r, []f64{len: n, init: index}, 1e-17) - println('ID: ${id} - Assertion: ${assertion}') -} else { - assertion := float64.arrays_tolerance(r, []f64{len: n}, 1e-17) +fn main() { + mpi.initialize()! + + defer { + mpi.finalize() + } + + if mpi.world_rank() == 0 { + println('Test MPI 01') + } + + println('Hello from rank ${mpi.world_rank()}') + println('The world has ${mpi.world_size()} processes') + + n := 11 + mut x := []f64{len: n} + id, sz := mpi.world_rank(), mpi.world_size() + start, endp1 := (id * n) / sz, ((id + 1) * n) / sz + for i := start; i < endp1; i++ { + x[i] = f64(i) + } + + // Communicator + comm := mpi.Communicator.new([])! + + // Barrier + comm.barrier() + + // sum to root + mut r := []f64{len: n} + comm.reduce_sum_f64(mut r, x) + if id == 0 { + assertion := float64.arrays_tolerance(r, []f64{len: n, init: index}, 1e-17) + println('ID: ${id} - Assertion: ${assertion}') + } else { + assertion := float64.arrays_tolerance(r, []f64{len: n}, 1e-17) + println('ID: ${id} - Assertion: ${assertion}') + } + + r[0] = 123.0 + comm.bcast_from_root_f64(r) + assertion := float64.arrays_tolerance(r, [123.0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 1e-17) println('ID: ${id} - Assertion: ${assertion}') } - -r[0] = 123.0 -comm.bcast_from_root_f64(r) -assertion := float64.arrays_tolerance(r, [123.0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 1e-17) -println('ID: ${id} - Assertion: ${assertion}') diff --git a/examples/plot_basic_heatmap/main.v b/examples/plot_basic_heatmap/main.v index c0eb735f8..2a53bebc5 100644 --- a/examples/plot_basic_heatmap/main.v +++ b/examples/plot_basic_heatmap/main.v @@ -2,21 +2,23 @@ module main import vsl.plot -z := [[1.0, 0, 30, 50, 1], [20.0, 1, 60, 80, 30], [30.0, 60, 1, -10, 20]] -x := ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday'] -y := ['Morning', 'Afternoon', 'Evening'] +fn main() { + z := [[1.0, 0, 30, 50, 1], [20.0, 1, 60, 80, 30], [30.0, 60, 1, -10, 20]] + x := ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday'] + y := ['Morning', 'Afternoon', 'Evening'] -mut plt := plot.Plot.new() + mut plt := plot.Plot.new() -plt.heatmap( - x: x - y: y - z: z -) -plt.layout( - title: 'Heatmap Basic Implementation' - width: 750 - height: 750 -) + plt.heatmap( + x: x + y: y + z: z + ) + plt.layout( + title: 'Heatmap Basic Implementation' + width: 750 + height: 750 + ) -plt.show()! + plt.show()! +} diff --git a/examples/plot_histogram/main.v b/examples/plot_histogram/main.v index 6d0eb0c09..180ed7307 100644 --- a/examples/plot_histogram/main.v +++ b/examples/plot_histogram/main.v @@ -3,22 +3,24 @@ module main import rand import vsl.plot -rand.seed([u32(1), 42]) +fn main() { + rand.seed([u32(1), 42]) -x := []f64{len: 1000, init: (0 * index) + rand.f64n(100) or { 0 }} + x := []f64{len: 1000, init: (0 * index) + rand.f64n(100) or { 0 }} -mut plt := plot.Plot.new() -plt.histogram( - x: x - xbins: plot.Bins{ - start: 0.0 - end: 100.0 - size: 2 - } -) -plt.layout( - title: 'Histogram Example' - width: 750 - height: 750 -) -plt.show()! + mut plt := plot.Plot.new() + plt.histogram( + x: x + xbins: plot.Bins{ + start: 0.0 + end: 100.0 + size: 2 + } + ) + plt.layout( + title: 'Histogram Example' + width: 750 + height: 750 + ) + plt.show()! +} diff --git a/examples/plot_scatter/main.v b/examples/plot_scatter/main.v index 95ed1423c..cecbf902a 100644 --- a/examples/plot_scatter/main.v +++ b/examples/plot_scatter/main.v @@ -3,37 +3,39 @@ module main import vsl.plot import vsl.util -y := [ - 0.0, - 1, - 3, - 1, - 0, - -1, - -3, - -1, - 0, - 1, - 3, - 1, - 0, -] -x := util.arange(y.len) +fn main() { + y := [ + 0.0, + 1, + 3, + 1, + 0, + -1, + -3, + -1, + 0, + 1, + 3, + 1, + 0, + ] + x := util.arange(y.len) -mut plt := plot.Plot.new() -plt.scatter( - x: x - y: y - mode: 'lines+markers' - marker: plot.Marker{ - size: []f64{len: x.len, init: 10.0} - color: []string{len: x.len, init: '#FF0000'} - } - line: plot.Line{ - color: '#FF0000' - } -) -plt.layout( - title: 'Scatter plot example' -) -plt.show()! + mut plt := plot.Plot.new() + plt.scatter( + x: x + y: y + mode: 'lines+markers' + marker: plot.Marker{ + size: []f64{len: x.len, init: 10.0} + color: []string{len: x.len, init: '#FF0000'} + } + line: plot.Line{ + color: '#FF0000' + } + ) + plt.layout( + title: 'Scatter plot example' + ) + plt.show()! +} diff --git a/examples/plot_scatter_colorscale/main.v b/examples/plot_scatter_colorscale/main.v index a975d3cd2..83d8cd0ee 100644 --- a/examples/plot_scatter_colorscale/main.v +++ b/examples/plot_scatter_colorscale/main.v @@ -3,34 +3,36 @@ module main import vsl.plot import vsl.util -y := [ - 0.0, - 1, - 3, - 1, - 0, - -1, - -3, - -1, - 0, - 1, - 3, - 1, - 0, -] -x := util.arange(y.len) +fn main() { + y := [ + 0.0, + 1, + 3, + 1, + 0, + -1, + -3, + -1, + 0, + 1, + 3, + 1, + 0, + ] + x := util.arange(y.len) -mut plt := plot.Plot.new() -plt.scatter( - x: x - y: y - mode: 'lines+markers' - colorscale: 'smoker' - marker: plot.Marker{ - size: []f64{len: x.len, init: 10.0} - } -) -plt.layout( - title: 'Scatter plot example' -) -plt.show()! + mut plt := plot.Plot.new() + plt.scatter( + x: x + y: y + mode: 'lines+markers' + colorscale: 'smoker' + marker: plot.Marker{ + size: []f64{len: x.len, init: 10.0} + } + ) + plt.layout( + title: 'Scatter plot example' + ) + plt.show()! +} diff --git a/examples/prime_factorization/main.v b/examples/prime_factorization/main.v index 49b43a936..a18891a5e 100644 --- a/examples/prime_factorization/main.v +++ b/examples/prime_factorization/main.v @@ -1,4 +1,3 @@ -// A simple prime factorizer module main import vsl.prime diff --git a/examples/roots_bisection_solver/main.v b/examples/roots_bisection_solver/main.v index 3e83fd304..56efc25cb 100644 --- a/examples/roots_bisection_solver/main.v +++ b/examples/roots_bisection_solver/main.v @@ -17,45 +17,47 @@ fn f_cos(x f64, _ []f64) f64 { return math.cos(x) } -f := func.Fn.new(f: f_cos) - -mut solver := roots.Bisection.new(f) - -solver.xmin = 0.0 -solver.xmax = 3.0 -solver.epsabs = epsabs -solver.epsrel = epsrel -solver.n_max = n_max - -result := solver.solve()? - -expected := math.pi / 2.0 -assert float64.soclose(result.x, expected, solver.epsabs) - -println('x = ${result.x}') - -mut plt := plot.Plot.new() - -x := util.lin_space(0.0, 3.0, 100) -y := x.map(f_cos(it, []f64{})) - -plt.scatter( - x: x - y: y - mode: 'lines' - line: plot.Line{ - color: '#FF0000' - } -) -plt.scatter( - x: [result.x] - y: [result.fx] - mode: 'markers' - marker: plot.Marker{ - color: ['#0000FF'] - } -) -plt.layout( - title: 'cos(x)' -) -plt.show()! +fn main() { + f := func.Fn.new(f: f_cos) + + mut solver := roots.Bisection.new(f) + + solver.xmin = 0.0 + solver.xmax = 3.0 + solver.epsabs = epsabs + solver.epsrel = epsrel + solver.n_max = n_max + + result := solver.solve()? + + expected := math.pi / 2.0 + assert float64.soclose(result.x, expected, solver.epsabs) + + println('x = ${result.x}') + + mut plt := plot.Plot.new() + + x := util.lin_space(0.0, 3.0, 100) + y := x.map(f_cos(it, []f64{})) + + plt.scatter( + x: x + y: y + mode: 'lines' + line: plot.Line{ + color: '#FF0000' + } + ) + plt.scatter( + x: [result.x] + y: [result.fx] + mode: 'markers' + marker: plot.Marker{ + color: ['#0000FF'] + } + ) + plt.layout( + title: 'cos(x)' + ) + plt.show()! +} diff --git a/examples/vcl_opencl_basic/main.v b/examples/vcl_opencl_basic/main.v index 8b3fb14b5..26830fe5a 100644 --- a/examples/vcl_opencl_basic/main.v +++ b/examples/vcl_opencl_basic/main.v @@ -9,43 +9,45 @@ __kernel void addOne(__global float* data) { data[i] += 1; }' -// get all devices if you want -devices := vcl.get_devices(vcl.DeviceType.cpu)! -println('Devices: ${devices}') - -// do not create platforms/devices/contexts/queues/... -// just get the device -mut device := vcl.get_default_device()! -defer { - device.release() or { panic(err) } +fn main() { + // get all devices if you want + devices := vcl.get_devices(vcl.DeviceType.cpu)! + println('Devices: ${devices}') + + // do not create platforms/devices/contexts/queues/... + // just get the device + mut device := vcl.get_default_device()! + defer { + device.release() or { panic(err) } + } + + // VCL has several kinds of device memory object: Bytes, Vector, Image (Soon) + // allocate buffer on the device (16 elems of f32). + mut v := device.vector[f32](16)! + defer { + v.release() or { panic(err) } + } + + // load data to the vector (it's async) + data := [f32(0.0), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + err := <-v.load(data) + if err !is none { + panic(err) + } + println('\n\nCreated vector: ${v}') + println(v.data()!) + + // add program source to device, get kernel + device.add_program(kernel_source)! + k := device.kernel('addOne')! + // run kernel (global work size 16 and local work size 1) + kernel_err := <-k.global(16).local(1).run(v) + if kernel_err !is none { + panic(kernel_err) + } + + // get data from vector + next_data := v.data()! + // prints out [1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16] + println('\n\nUpdated vector data: ${next_data}') } - -// VCL has several kinds of device memory object: Bytes, Vector, Image (Soon) -// allocate buffer on the device (16 elems of f32). -mut v := device.vector[f32](16)! -defer { - v.release() or { panic(err) } -} - -// load data to the vector (it's async) -data := [f32(0.0), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] -err := <-v.load(data) -if err !is none { - panic(err) -} -println('\n\nCreated vector: ${v}') -println(v.data()!) - -// add program source to device, get kernel -device.add_program(kernel_source)! -k := device.kernel('addOne')! -// run kernel (global work size 16 and local work size 1) -kernel_err := <-k.global(16).local(1).run(v) -if kernel_err !is none { - panic(kernel_err) -} - -// get data from vector -next_data := v.data()! -// prints out [1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16] -println('\n\nUpdated vector data: ${next_data}') diff --git a/plot/show.v b/plot/show.v index 831424830..f329e980c 100644 --- a/plot/show.v +++ b/plot/show.v @@ -8,9 +8,13 @@ import os import time // PlotConfig is a configuration for the Plotly plot. +// It includes the configuration for the web server that serves the plot. [params] pub struct PlotConfig { + net.ListenOptions + timeout time.Duration = 1 * time.second use_cdn bool + saddr string = ':0' } // show starts a web server and opens a browser window to display the plot. @@ -22,9 +26,12 @@ pub fn (p Plot) show(config PlotConfig) ! { use_cdn: true plot: p } - listener := net.listen_tcp(net.AddrFamily.ip, ':0')! + listener := net.listen_tcp(net.AddrFamily.ip, config.saddr, + dualstack: config.dualstack + backlog: config.backlog + )! mut server := &http.Server{ - accept_timeout: 1 * time.second + accept_timeout: config.timeout listener: listener handler: handler }