1661024700
Sequential programming for node.js, end of callback hell.
Simple, straightforward abstraction over Fibers.
By using wait.for, you can call any nodejs standard async function in sequential/Sync mode, waiting for result data, without blocking node's event loop (thanks to fibers)
A nodejs standard async function is a function in which the last parameter is a callback: function(err,data)
Advantages:
##NEWS
###Aug-2013 - Wait.for-ES6 based on ES6-generators
I've developed a version based on JavaScript upcoming ES6-Harmony generators. It's not based on node-fibers. Surprisingly, ES6 based implementation of wait.for(asyncFn) is almost a no-op, you can even completely omit it. Warning: Bleeding edge. Check [Wait.for-ES6] (https://github.com/luciotato/waitfor-ES6)
npm install wait.for
You need to be in a Fiber to be able to use wait.for. The ideal place to launch a fiber is when a request arrives, to handle it:
var server = http.createServer(
function(req, res){
console.log('req!');
wait.launchFiber(handler,req,res); //handle in a fiber, keep node spinning
}).listen(8000);
then,at function handler(req,res) and every function you call from there, you'll be able to use wait.for(ayncFn...
var wait = require('wait.for');
function anyStandardAsync(param, callback){
setTimeout( function(){
callback(null,'hi '+param);
}, 5000);
};
function testFunction(){
console.log('fiber start');
var result = wait.for(anyStandardAsync,'test');
console.log('function returned:', result);
console.log('fiber end');
};
console.log('app start');
wait.launchFiber(testFunction);
console.log('after launch');
var wait = require('wait.for');
var express = require('express');
var app = express();
// in a Fiber
function handleGet(req, res){
res.send( wait.for(fs.readFile,'largeFile.html') );
}
app.get('/', function(req,res){
wait.launchFiber(handleGet, req, res); //handle in a fiber, keep node spinning
});
app.listen(3000);
see cradle example
var wait=require('wait.for');
// launch a new fiber
wait.launchFiber(my_sequential_function, arg,arg,...)
// in a fiber.. We can wait for async functions
function my_sequential_function(arg,arg...){
// call async_function(arg1), wait for result, return data
var myObj = wait.for(async_function, arg1);
// call myObj.querydata(arg1,arg2), wait for result, return data
var myObjData = wait.forMethod(myObj,'queryData', arg1, arg2);
console.log(myObjData.toString());
}
##Notes on non-standard callbacks. e.g.: connection.query from mysql, database.prepare on node-sqlite3
wait.for expects standardized callbacks. A standardized callback always returns (err,data) in that order.
A solution for the sql.query method and other non-standard callbacks is to create a wrapper function standardizing the callback, e.g.:
connection.prototype.q = function(sql, params, stdCallback){
this.query(sql,params, function(err,rows,columns){
return stdCallback(err,{rows:rows,columns:columns});
});
}
usage:
try {
var result = wait.forMethod(connection, "q", options.sql, options.params);
console.log(result.rows);
console.log(result.columns);
}
catch(err) {
console.log(err);
}
e.g.: node-sqlite3's database.prepare
var sqlite3 = require('sqlite3').verbose();
var db = new sqlite3.Database(':memory:');
db.prototype.prep = function(sql, stdCallback){
var stmt = this.prepare(sql, function(err){
return stdCallback(err, stmt);
});
}
var stmt = wait.forMethod (db, 'prep', "INSERT OR REPLACE INTO foo (a,b,c) VALUES (?,?,?)");
DNS testing, using pure node.js (a little of callback hell):
var dns = require("dns");
function test(){
dns.resolve4("google.com", function(err, addresses) {
if (err) throw err;
for (var i = 0; i < addresses.length; i++) {
var a = addresses[i];
dns.reverse(a, function (err, data) {
if (err) throw err;
console.log("reverse for " + a + ": " + JSON.stringify(data));
});
};
});
}
test();
THE SAME CODE, using wait.for (sequential):
var dns = require("dns"), wait=require('wait.for');
function test(){
var addresses = wait.for(dns.resolve4,"google.com");
for (var i = 0; i < addresses.length; i++) {
var a = addresses[i];
console.log("reverse for " + a + ": " + JSON.stringify(wait.for(dns.reverse,a)));
}
}
wait.launchFiber(test);
using pure node.js (a callback hell):
var db = require("some-db-abstraction");
function handleWithdrawal(req,res){
try {
var amount=req.param("amount");
db.select("* from sessions where session_id=?",req.param("session_id"),function(err,sessiondata) {
if (err) throw err;
db.select("* from accounts where user_id=?",sessiondata.user_ID),function(err,accountdata) {
if (err) throw err;
if (accountdata.balance < amount) throw new Error('insufficient funds');
db.execute("withdrawal(?,?)",accountdata.ID,req.param("amount"), function(err,data) {
if (err) throw err;
res.write("withdrawal OK, amount: "+ req.param("amount"));
db.select("balance from accounts where account_id=?", accountdata.ID,function(err,balance) {
if (err) throw err;
res.end("your current balance is " + balance.amount);
});
});
});
});
}
catch(err) {
res.end("Withdrawal error: " + err.message);
}
}
Note: The above code, although it looks like it will catch the exceptions, it will not. Catching exceptions with callback hell adds a lot of pain, and i'm not sure if you will have the 'res' parameter to respond to the user. If somebody like to fix this example... be my guest.
THE SAME CODE, using wait.for (sequential logic - sequential programming):
var db = require("some-db-abstraction"), wait=require('wait.for');
function handleWithdrawal(req,res){
try {
var amount=req.param("amount");
sessiondata = wait.forMethod(db,"select","* from session where session_id=?",req.param("session_id"));
accountdata = wait.forMethod(db,"select","* from accounts where user_id=?",sessiondata.user_ID);
if (accountdata.balance < amount) throw new Error('insufficient funds');
wait.forMethod(db,"execute","withdrawal(?,?)",accountdata.ID,req.param("amount"));
res.write("withdrawal OK, amount: "+ req.param("amount"));
balance = wait.forMethod(db,"select","balance from accounts where account_id=?", accountdata.ID);
res.end("your current balance is " + balance.amount);
}
catch(err) {
res.end("Withdrawal error: " + err.message);
}
}
Note: Exceptions will be catched as expected. db methods (db.select, db.execute) will be called with this=db
##How does wait.launchFiber works?
wait.launchFiber(genFn,param1,param2)
starts executing the function genFn
as a fiber-generator until a "yield" (wait.for) is found, then wait.launchFiber
execute the "yielded" value (a call to an async function), and links generator's "next" with the async callback(err,data), so when the async finishes and the callback is called, the fiber/generator "continues" after the var x =wait.for(...)
.
###wait.parallel.launch(functions:Array)
Note: must be in a Fiber
####input:
wait.parallel.launch expects an array of [[func,arg,arg..],[func,arg,arg..],...] and then launches a fiber for each function call, in parallel, and waits for all the fibers to complete.
The functions to be called should not be async functions.
Each called sync function will be executed in it's own fiber, and this sync function should/can use data=wait.for(..)
internally in order to call async functions.
####actions: -launchs a fiber for each func -the fiber does resultArray[index] = func.apply(undefined,args)
####returns:
###wait.parallel.map(arr:Array, mappedFn:function)
Note: must be in a Fiber
####input:
-- mappedFn should return converted item. Since we're in a fiber -- mappedFn can use wait.for and also throw/try/catch
####returns:
###wait.parallel.filter(arr:Array, itemTestFn:function)
Note: must be in a Fiber
####input:
-- itemTestFn should return true|false. Since we're in a fiber -- itemTestFn can use wait.for and also throw/try/catch
####returns
Parallel Usage Example: see:
Author: luciotato
Source Code: https://github.com/luciotato/waitfor
License: MIT license
1651383480
This serverless plugin is a wrapper for amplify-appsync-simulator made for testing AppSync APIs built with serverless-appsync-plugin.
Install
npm install serverless-appsync-simulator
# or
yarn add serverless-appsync-simulator
Usage
This plugin relies on your serverless yml file and on the serverless-offline
plugin.
plugins:
- serverless-dynamodb-local # only if you need dynamodb resolvers and you don't have an external dynamodb
- serverless-appsync-simulator
- serverless-offline
Note: Order is important serverless-appsync-simulator
must go before serverless-offline
To start the simulator, run the following command:
sls offline start
You should see in the logs something like:
...
Serverless: AppSync endpoint: http://localhost:20002/graphql
Serverless: GraphiQl: http://localhost:20002
...
Configuration
Put options under custom.appsync-simulator
in your serverless.yml
file
| option | default | description | | ------------------------ | -------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------- | | apiKey | 0123456789
| When using API_KEY
as authentication type, the key to authenticate to the endpoint. | | port | 20002 | AppSync operations port; if using multiple APIs, the value of this option will be used as a starting point, and each other API will have a port of lastPort + 10 (e.g. 20002, 20012, 20022, etc.) | | wsPort | 20003 | AppSync subscriptions port; if using multiple APIs, the value of this option will be used as a starting point, and each other API will have a port of lastPort + 10 (e.g. 20003, 20013, 20023, etc.) | | location | . (base directory) | Location of the lambda functions handlers. | | refMap | {} | A mapping of resource resolutions for the Ref
function | | getAttMap | {} | A mapping of resource resolutions for the GetAtt
function | | importValueMap | {} | A mapping of resource resolutions for the ImportValue
function | | functions | {} | A mapping of external functions for providing invoke url for external fucntions | | dynamoDb.endpoint | http://localhost:8000 | Dynamodb endpoint. Specify it if you're not using serverless-dynamodb-local. Otherwise, port is taken from dynamodb-local conf | | dynamoDb.region | localhost | Dynamodb region. Specify it if you're connecting to a remote Dynamodb intance. | | dynamoDb.accessKeyId | DEFAULT_ACCESS_KEY | AWS Access Key ID to access DynamoDB | | dynamoDb.secretAccessKey | DEFAULT_SECRET | AWS Secret Key to access DynamoDB | | dynamoDb.sessionToken | DEFAULT_ACCESS_TOKEEN | AWS Session Token to access DynamoDB, only if you have temporary security credentials configured on AWS | | dynamoDb.* | | You can add every configuration accepted by DynamoDB SDK | | rds.dbName | | Name of the database | | rds.dbHost | | Database host | | rds.dbDialect | | Database dialect. Possible values (mysql | postgres) | | rds.dbUsername | | Database username | | rds.dbPassword | | Database password | | rds.dbPort | | Database port | | watch | - *.graphql
- *.vtl | Array of glob patterns to watch for hot-reloading. |
Example:
custom:
appsync-simulator:
location: '.webpack/service' # use webpack build directory
dynamoDb:
endpoint: 'http://my-custom-dynamo:8000'
Hot-reloading
By default, the simulator will hot-relad when changes to *.graphql
or *.vtl
files are detected. Changes to *.yml
files are not supported (yet? - this is a Serverless Framework limitation). You will need to restart the simulator each time you change yml files.
Hot-reloading relies on watchman. Make sure it is installed on your system.
You can change the files being watched with the watch
option, which is then passed to watchman as the match expression.
e.g.
custom:
appsync-simulator:
watch:
- ["match", "handlers/**/*.vtl", "wholename"] # => array is interpreted as the literal match expression
- "*.graphql" # => string like this is equivalent to `["match", "*.graphql"]`
Or you can opt-out by leaving an empty array or set the option to false
Note: Functions should not require hot-reloading, unless you are using a transpiler or a bundler (such as webpack, babel or typescript), un which case you should delegate hot-reloading to that instead.
Resource CloudFormation functions resolution
This plugin supports some resources resolution from the Ref
, Fn::GetAtt
and Fn::ImportValue
functions in your yaml file. It also supports some other Cfn functions such as Fn::Join
, Fb::Sub
, etc.
Note: Under the hood, this features relies on the cfn-resolver-lib package. For more info on supported cfn functions, refer to the documentation
You can reference resources in your functions' environment variables (that will be accessible from your lambda functions) or datasource definitions. The plugin will automatically resolve them for you.
provider:
environment:
BUCKET_NAME:
Ref: MyBucket # resolves to `my-bucket-name`
resources:
Resources:
MyDbTable:
Type: AWS::DynamoDB::Table
Properties:
TableName: myTable
...
MyBucket:
Type: AWS::S3::Bucket
Properties:
BucketName: my-bucket-name
...
# in your appsync config
dataSources:
- type: AMAZON_DYNAMODB
name: dynamosource
config:
tableName:
Ref: MyDbTable # resolves to `myTable`
Sometimes, some references cannot be resolved, as they come from an Output from Cloudformation; or you might want to use mocked values in your local environment.
In those cases, you can define (or override) those values using the refMap
, getAttMap
and importValueMap
options.
refMap
takes a mapping of resource name to value pairsgetAttMap
takes a mapping of resource name to attribute/values pairsimportValueMap
takes a mapping of import name to values pairsExample:
custom:
appsync-simulator:
refMap:
# Override `MyDbTable` resolution from the previous example.
MyDbTable: 'mock-myTable'
getAttMap:
# define ElasticSearchInstance DomainName
ElasticSearchInstance:
DomainEndpoint: 'localhost:9200'
importValueMap:
other-service-api-url: 'https://other.api.url.com/graphql'
# in your appsync config
dataSources:
- type: AMAZON_ELASTICSEARCH
name: elasticsource
config:
# endpoint resolves as 'http://localhost:9200'
endpoint:
Fn::Join:
- ''
- - https://
- Fn::GetAtt:
- ElasticSearchInstance
- DomainEndpoint
In some special cases you will need to use key-value mock nottation. Good example can be case when you need to include serverless stage value (${self:provider.stage}
) in the import name.
This notation can be used with all mocks - refMap
, getAttMap
and importValueMap
provider:
environment:
FINISH_ACTIVITY_FUNCTION_ARN:
Fn::ImportValue: other-service-api-${self:provider.stage}-url
custom:
serverless-appsync-simulator:
importValueMap:
- key: other-service-api-${self:provider.stage}-url
value: 'https://other.api.url.com/graphql'
This plugin only tries to resolve the following parts of the yml tree:
provider.environment
functions[*].environment
custom.appSync
If you have the need of resolving others, feel free to open an issue and explain your use case.
For now, the supported resources to be automatically resovled by Ref:
are:
Feel free to open a PR or an issue to extend them as well.
External functions
When a function is not defined withing the current serverless file you can still call it by providing an invoke url which should point to a REST method. Make sure you specify "get" or "post" for the method. Default is "get", but you probably want "post".
custom:
appsync-simulator:
functions:
addUser:
url: http://localhost:3016/2015-03-31/functions/addUser/invocations
method: post
addPost:
url: https://jsonplaceholder.typicode.com/posts
method: post
Supported Resolver types
This plugin supports resolvers implemented by amplify-appsync-simulator
, as well as custom resolvers.
From Aws Amplify:
Implemented by this plugin
#set( $cols = [] )
#set( $vals = [] )
#foreach( $entry in $ctx.args.input.keySet() )
#set( $regex = "([a-z])([A-Z]+)")
#set( $replacement = "$1_$2")
#set( $toSnake = $entry.replaceAll($regex, $replacement).toLowerCase() )
#set( $discard = $cols.add("$toSnake") )
#if( $util.isBoolean($ctx.args.input[$entry]) )
#if( $ctx.args.input[$entry] )
#set( $discard = $vals.add("1") )
#else
#set( $discard = $vals.add("0") )
#end
#else
#set( $discard = $vals.add("'$ctx.args.input[$entry]'") )
#end
#end
#set( $valStr = $vals.toString().replace("[","(").replace("]",")") )
#set( $colStr = $cols.toString().replace("[","(").replace("]",")") )
#if ( $valStr.substring(0, 1) != '(' )
#set( $valStr = "($valStr)" )
#end
#if ( $colStr.substring(0, 1) != '(' )
#set( $colStr = "($colStr)" )
#end
{
"version": "2018-05-29",
"statements": ["INSERT INTO <name-of-table> $colStr VALUES $valStr", "SELECT * FROM <name-of-table> ORDER BY id DESC LIMIT 1"]
}
#set( $update = "" )
#set( $equals = "=" )
#foreach( $entry in $ctx.args.input.keySet() )
#set( $cur = $ctx.args.input[$entry] )
#set( $regex = "([a-z])([A-Z]+)")
#set( $replacement = "$1_$2")
#set( $toSnake = $entry.replaceAll($regex, $replacement).toLowerCase() )
#if( $util.isBoolean($cur) )
#if( $cur )
#set ( $cur = "1" )
#else
#set ( $cur = "0" )
#end
#end
#if ( $util.isNullOrEmpty($update) )
#set($update = "$toSnake$equals'$cur'" )
#else
#set($update = "$update,$toSnake$equals'$cur'" )
#end
#end
{
"version": "2018-05-29",
"statements": ["UPDATE <name-of-table> SET $update WHERE id=$ctx.args.input.id", "SELECT * FROM <name-of-table> WHERE id=$ctx.args.input.id"]
}
{
"version": "2018-05-29",
"statements": ["UPDATE <name-of-table> set deleted_at=NOW() WHERE id=$ctx.args.id", "SELECT * FROM <name-of-table> WHERE id=$ctx.args.id"]
}
#set ( $index = -1)
#set ( $result = $util.parseJson($ctx.result) )
#set ( $meta = $result.sqlStatementResults[1].columnMetadata)
#foreach ($column in $meta)
#set ($index = $index + 1)
#if ( $column["typeName"] == "timestamptz" )
#set ($time = $result["sqlStatementResults"][1]["records"][0][$index]["stringValue"] )
#set ( $nowEpochMillis = $util.time.parseFormattedToEpochMilliSeconds("$time.substring(0,19)+0000", "yyyy-MM-dd HH:mm:ssZ") )
#set ( $isoDateTime = $util.time.epochMilliSecondsToISO8601($nowEpochMillis) )
$util.qr( $result["sqlStatementResults"][1]["records"][0][$index].put("stringValue", "$isoDateTime") )
#end
#end
#set ( $res = $util.parseJson($util.rds.toJsonString($util.toJson($result)))[1][0] )
#set ( $response = {} )
#foreach($mapKey in $res.keySet())
#set ( $s = $mapKey.split("_") )
#set ( $camelCase="" )
#set ( $isFirst=true )
#foreach($entry in $s)
#if ( $isFirst )
#set ( $first = $entry.substring(0,1) )
#else
#set ( $first = $entry.substring(0,1).toUpperCase() )
#end
#set ( $isFirst=false )
#set ( $stringLength = $entry.length() )
#set ( $remaining = $entry.substring(1, $stringLength) )
#set ( $camelCase = "$camelCase$first$remaining" )
#end
$util.qr( $response.put("$camelCase", $res[$mapKey]) )
#end
$utils.toJson($response)
Variable map support is limited and does not differentiate numbers and strings data types, please inject them directly if needed.
Will be escaped properly: null
, true
, and false
values.
{
"version": "2018-05-29",
"statements": [
"UPDATE <name-of-table> set deleted_at=NOW() WHERE id=:ID",
"SELECT * FROM <name-of-table> WHERE id=:ID and unix_timestamp > $ctx.args.newerThan"
],
variableMap: {
":ID": $ctx.args.id,
## ":TIMESTAMP": $ctx.args.newerThan -- This will be handled as a string!!!
}
}
Requires
Author: Serverless-appsync
Source Code: https://github.com/serverless-appsync/serverless-appsync-simulator
License: MIT License
1632537859
Not babashka. Node.js babashka!?
Ad-hoc CLJS scripting on Node.js.
Experimental. Please report issues here.
Nbb's main goal is to make it easy to get started with ad hoc CLJS scripting on Node.js.
Additional goals and features are:
Nbb requires Node.js v12 or newer.
CLJS code is evaluated through SCI, the same interpreter that powers babashka. Because SCI works with advanced compilation, the bundle size, especially when combined with other dependencies, is smaller than what you get with self-hosted CLJS. That makes startup faster. The trade-off is that execution is less performant and that only a subset of CLJS is available (e.g. no deftype, yet).
Install nbb
from NPM:
$ npm install nbb -g
Omit -g
for a local install.
Try out an expression:
$ nbb -e '(+ 1 2 3)'
6
And then install some other NPM libraries to use in the script. E.g.:
$ npm install csv-parse shelljs zx
Create a script which uses the NPM libraries:
(ns script
(:require ["csv-parse/lib/sync$default" :as csv-parse]
["fs" :as fs]
["path" :as path]
["shelljs$default" :as sh]
["term-size$default" :as term-size]
["zx$default" :as zx]
["zx$fs" :as zxfs]
[nbb.core :refer [*file*]]))
(prn (path/resolve "."))
(prn (term-size))
(println (count (str (fs/readFileSync *file*))))
(prn (sh/ls "."))
(prn (csv-parse "foo,bar"))
(prn (zxfs/existsSync *file*))
(zx/$ #js ["ls"])
Call the script:
$ nbb script.cljs
"/private/tmp/test-script"
#js {:columns 216, :rows 47}
510
#js ["node_modules" "package-lock.json" "package.json" "script.cljs"]
#js [#js ["foo" "bar"]]
true
$ ls
node_modules
package-lock.json
package.json
script.cljs
Nbb has first class support for macros: you can define them right inside your .cljs
file, like you are used to from JVM Clojure. Consider the plet
macro to make working with promises more palatable:
(defmacro plet
[bindings & body]
(let [binding-pairs (reverse (partition 2 bindings))
body (cons 'do body)]
(reduce (fn [body [sym expr]]
(let [expr (list '.resolve 'js/Promise expr)]
(list '.then expr (list 'clojure.core/fn (vector sym)
body))))
body
binding-pairs)))
Using this macro we can look async code more like sync code. Consider this puppeteer example:
(-> (.launch puppeteer)
(.then (fn [browser]
(-> (.newPage browser)
(.then (fn [page]
(-> (.goto page "https://clojure.org")
(.then #(.screenshot page #js{:path "screenshot.png"}))
(.catch #(js/console.log %))
(.then #(.close browser)))))))))
Using plet
this becomes:
(plet [browser (.launch puppeteer)
page (.newPage browser)
_ (.goto page "https://clojure.org")
_ (-> (.screenshot page #js{:path "screenshot.png"})
(.catch #(js/console.log %)))]
(.close browser))
See the puppeteer example for the full code.
Since v0.0.36, nbb includes promesa which is a library to deal with promises. The above plet
macro is similar to promesa.core/let
.
$ time nbb -e '(+ 1 2 3)'
6
nbb -e '(+ 1 2 3)' 0.17s user 0.02s system 109% cpu 0.168 total
The baseline startup time for a script is about 170ms seconds on my laptop. When invoked via npx
this adds another 300ms or so, so for faster startup, either use a globally installed nbb
or use $(npm bin)/nbb script.cljs
to bypass npx
.
Nbb does not depend on any NPM dependencies. All NPM libraries loaded by a script are resolved relative to that script. When using the Reagent module, React is resolved in the same way as any other NPM library.
To load .cljs
files from local paths or dependencies, you can use the --classpath
argument. The current dir is added to the classpath automatically. So if there is a file foo/bar.cljs
relative to your current dir, then you can load it via (:require [foo.bar :as fb])
. Note that nbb
uses the same naming conventions for namespaces and directories as other Clojure tools: foo-bar
in the namespace name becomes foo_bar
in the directory name.
To load dependencies from the Clojure ecosystem, you can use the Clojure CLI or babashka to download them and produce a classpath:
$ classpath="$(clojure -A:nbb -Spath -Sdeps '{:aliases {:nbb {:replace-deps {com.github.seancorfield/honeysql {:git/tag "v2.0.0-rc5" :git/sha "01c3a55"}}}}}')"
and then feed it to the --classpath
argument:
$ nbb --classpath "$classpath" -e "(require '[honey.sql :as sql]) (sql/format {:select :foo :from :bar :where [:= :baz 2]})"
["SELECT foo FROM bar WHERE baz = ?" 2]
Currently nbb
only reads from directories, not jar files, so you are encouraged to use git libs. Support for .jar
files will be added later.
The name of the file that is currently being executed is available via nbb.core/*file*
or on the metadata of vars:
(ns foo
(:require [nbb.core :refer [*file*]]))
(prn *file*) ;; "/private/tmp/foo.cljs"
(defn f [])
(prn (:file (meta #'f))) ;; "/private/tmp/foo.cljs"
Nbb includes reagent.core
which will be lazily loaded when required. You can use this together with ink to create a TUI application:
$ npm install ink
ink-demo.cljs
:
(ns ink-demo
(:require ["ink" :refer [render Text]]
[reagent.core :as r]))
(defonce state (r/atom 0))
(doseq [n (range 1 11)]
(js/setTimeout #(swap! state inc) (* n 500)))
(defn hello []
[:> Text {:color "green"} "Hello, world! " @state])
(render (r/as-element [hello]))
Working with callbacks and promises can become tedious. Since nbb v0.0.36 the promesa.core
namespace is included with the let
and do!
macros. An example:
(ns prom
(:require [promesa.core :as p]))
(defn sleep [ms]
(js/Promise.
(fn [resolve _]
(js/setTimeout resolve ms))))
(defn do-stuff
[]
(p/do!
(println "Doing stuff which takes a while")
(sleep 1000)
1))
(p/let [a (do-stuff)
b (inc a)
c (do-stuff)
d (+ b c)]
(prn d))
$ nbb prom.cljs
Doing stuff which takes a while
Doing stuff which takes a while
3
Also see API docs.
Since nbb v0.0.75 applied-science/js-interop is available:
(ns example
(:require [applied-science.js-interop :as j]))
(def o (j/lit {:a 1 :b 2 :c {:d 1}}))
(prn (j/select-keys o [:a :b])) ;; #js {:a 1, :b 2}
(prn (j/get-in o [:c :d])) ;; 1
Most of this library is supported in nbb, except the following:
:syms
.-x
notation. In nbb, you must use keywords.See the example of what is currently supported.
See the examples directory for small examples.
Also check out these projects built with nbb:
See API documentation.
See this gist on how to convert an nbb script or project to shadow-cljs.
Prequisites:
To build:
bb release
Run bb tasks
for more project-related tasks.
Download Details:
Author: borkdude
Download Link: Download The Source Code
Official Website: https://github.com/borkdude/nbb
License: EPL-1.0
#node #javascript
1648972740
Generis
Versatile Go code generator.
Generis is a lightweight code preprocessor adding the following features to the Go language :
package main;
// -- IMPORTS
import (
"html"
"io"
"log"
"net/http"
"net/url"
"strconv"
);
// -- DEFINITIONS
#define DebugMode
#as true
// ~~
#define HttpPort
#as 8080
// ~~
#define WriteLine( {{text}} )
#as log.Println( {{text}} )
// ~~
#define local {{variable}} : {{type}};
#as var {{variable}} {{type}};
// ~~
#define DeclareStack( {{type}}, {{name}} )
#as
// -- TYPES
type {{name}}Stack struct
{
ElementArray []{{type}};
}
// -- INQUIRIES
func ( stack * {{name}}Stack ) IsEmpty(
) bool
{
return len( stack.ElementArray ) == 0;
}
// -- OPERATIONS
func ( stack * {{name}}Stack ) Push(
element {{type}}
)
{
stack.ElementArray = append( stack.ElementArray, element );
}
// ~~
func ( stack * {{name}}Stack ) Pop(
) {{type}}
{
local
element : {{type}};
element = stack.ElementArray[ len( stack.ElementArray ) - 1 ];
stack.ElementArray = stack.ElementArray[ : len( stack.ElementArray ) - 1 ];
return element;
}
#end
// ~~
#define DeclareStack( {{type}} )
#as DeclareStack( {{type}}, {{type:PascalCase}} )
// -- TYPES
DeclareStack( string )
DeclareStack( int32 )
// -- FUNCTIONS
func HandleRootPage(
response_writer http.ResponseWriter,
request * http.Request
)
{
local
boolean : bool;
local
natural : uint;
local
integer : int;
local
real : float64;
local
escaped_html_text,
escaped_url_text,
text : string;
local
integer_stack : Int32Stack;
boolean = true;
natural = 10;
integer = 20;
real = 30.0;
text = "text";
escaped_url_text = "&escaped text?";
escaped_html_text = "<escaped text/>";
integer_stack.Push( 10 );
integer_stack.Push( 20 );
integer_stack.Push( 30 );
#write response_writer
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title><%= request.URL.Path %></title>
</head>
<body>
<% if ( boolean ) { %>
<%= "URL : " + request.URL.Path %>
<br/>
<%@ natural %>
<%# integer %>
<%& real %>
<br/>
<%~ text %>
<%^ escaped_url_text %>
<%= escaped_html_text %>
<%= "<%% ignored %%>" %>
<%% ignored %%>
<% } %>
<br/>
Stack :
<br/>
<% for !integer_stack.IsEmpty() { %>
<%# integer_stack.Pop() %>
<% } %>
</body>
</html>
#end
}
// ~~
func main()
{
http.HandleFunc( "/", HandleRootPage );
#if DebugMode
WriteLine( "Listening on http://localhost:HttpPort" );
#end
log.Fatal(
http.ListenAndServe( ":HttpPort", nil )
);
}
Constants and generic code can be defined with the following syntax :
#define old code
#as new code
#define old code
#as
new
code
#end
#define
old
code
#as new code
#define
old
code
#as
new
code
#end
The #define
directive can contain one or several parameters :
{{variable name}} : hierarchical code (with properly matching brackets and parentheses)
{{variable name#}} : statement code (hierarchical code without semicolon)
{{variable name$}} : plain code
{{variable name:boolean expression}} : conditional hierarchical code
{{variable name#:boolean expression}} : conditional statement code
{{variable name$:boolean expression}} : conditional plain code
They can have a boolean expression to require they match specific conditions :
HasText text
HasPrefix prefix
HasSuffix suffix
HasIdentifier text
false
true
!expression
expression && expression
expression || expression
( expression )
The #define
directive must not start or end with a parameter.
The #as
directive can use the value of the #define
parameters :
{{variable name}}
{{variable name:filter function}}
{{variable name:filter function:filter function:...}}
Their value can be changed through one or several filter functions :
LowerCase
UpperCase
MinorCase
MajorCase
SnakeCase
PascalCase
CamelCase
RemoveComments
RemoveBlanks
PackStrings
PackIdentifiers
ReplacePrefix old_prefix new_prefix
ReplaceSuffix old_suffix new_suffix
ReplaceText old_text new_text
ReplaceIdentifier old_identifier new_identifier
AddPrefix prefix
AddSuffix suffix
RemovePrefix prefix
RemoveSuffix suffix
RemoveText text
RemoveIdentifier identifier
Conditional code can be defined with the following syntax :
#if boolean expression
#if boolean expression
...
#else
...
#end
#else
#if boolean expression
...
#else
...
#end
#end
The boolean expression can use the following operators :
false
true
!expression
expression && expression
expression || expression
( expression )
Templated HTML code can be sent to a stream writer using the following syntax :
#write writer expression
<% code %>
<%@ natural expression %>
<%# integer expression %>
<%& real expression %>
<%~ text expression %>
<%= escaped text expression %>
<%! removed content %>
<%% ignored tags %%>
#end
--join
option requires to end the statements with a semicolon.#writer
directive is only available for the Go language.Install the DMD 2 compiler (using the MinGW setup option on Windows).
Build the executable with the following command line :
dmd -m64 generis.d
generis [options]
--prefix # : set the command prefix
--parse INPUT_FOLDER/ : parse the definitions of the Generis files in the input folder
--process INPUT_FOLDER/ OUTPUT_FOLDER/ : reads the Generis files in the input folder and writes the processed files in the output folder
--trim : trim the HTML templates
--join : join the split statements
--create : create the output folders if needed
--watch : watch the Generis files for modifications
--pause 500 : time to wait before checking the Generis files again
--tabulation 4 : set the tabulation space count
--extension .go : generate files with this extension
generis --process GS/ GO/
Reads the Generis files in the GS/
folder and writes Go files in the GO/
folder.
generis --process GS/ GO/ --create
Reads the Generis files in the GS/
folder and writes Go files in the GO/
folder, creating the output folders if needed.
generis --process GS/ GO/ --create --watch
Reads the Generis files in the GS/
folder and writes Go files in the GO/
folder, creating the output folders if needed and watching the Generis files for modifications.
generis --process GS/ GO/ --trim --join --create --watch
Reads the Generis files in the GS/
folder and writes Go files in the GO/
folder, trimming the HTML templates, joining the split statements, creating the output folders if needed and watching the Generis files for modifications.
2.0
Author: Senselogic
Source Code: https://github.com/senselogic/GENERIS
License: View license
1616671994
If you look at the backend technology used by today’s most popular apps there is one thing you would find common among them and that is the use of NodeJS Framework. Yes, the NodeJS framework is that effective and successful.
If you wish to have a strong backend for efficient app performance then have NodeJS at the backend.
WebClues Infotech offers different levels of experienced and expert professionals for your app development needs. So hire a dedicated NodeJS developer from WebClues Infotech with your experience requirement and expertise.
So what are you waiting for? Get your app developed with strong performance parameters from WebClues Infotech
For inquiry click here: https://www.webcluesinfotech.com/hire-nodejs-developer/
Book Free Interview: https://bit.ly/3dDShFg
#hire dedicated node.js developers #hire node.js developers #hire top dedicated node.js developers #hire node.js developers in usa & india #hire node js development company #hire the best node.js developers & programmers
1622719015
Front-end web development has been overwhelmed by JavaScript highlights for quite a long time. Google, Facebook, Wikipedia, and most of all online pages use JS for customer side activities. As of late, it additionally made a shift to cross-platform mobile development as a main technology in React Native, Nativescript, Apache Cordova, and other crossover devices.
Throughout the most recent couple of years, Node.js moved to backend development as well. Designers need to utilize a similar tech stack for the whole web project without learning another language for server-side development. Node.js is a device that adjusts JS usefulness and syntax to the backend.
Node.js isn’t a language, or library, or system. It’s a runtime situation: commonly JavaScript needs a program to work, however Node.js makes appropriate settings for JS to run outside of the program. It’s based on a JavaScript V8 motor that can run in Chrome, different programs, or independently.
The extent of V8 is to change JS program situated code into machine code — so JS turns into a broadly useful language and can be perceived by servers. This is one of the advantages of utilizing Node.js in web application development: it expands the usefulness of JavaScript, permitting designers to coordinate the language with APIs, different languages, and outside libraries.
Of late, organizations have been effectively changing from their backend tech stacks to Node.js. LinkedIn picked Node.js over Ruby on Rails since it took care of expanding responsibility better and decreased the quantity of servers by multiple times. PayPal and Netflix did something comparative, just they had a goal to change their design to microservices. We should investigate the motivations to pick Node.JS for web application development and when we are planning to hire node js developers.
The principal thing that makes Node.js a go-to environment for web development is its JavaScript legacy. It’s the most well known language right now with a great many free devices and a functioning local area. Node.js, because of its association with JS, immediately rose in ubiquity — presently it has in excess of 368 million downloads and a great many free tools in the bundle module.
Alongside prevalence, Node.js additionally acquired the fundamental JS benefits:
In addition, it’s a piece of a well known MEAN tech stack (the blend of MongoDB, Express.js, Angular, and Node.js — four tools that handle all vital parts of web application development).
This is perhaps the most clear advantage of Node.js web application development. JavaScript is an unquestionable requirement for web development. Regardless of whether you construct a multi-page or single-page application, you need to know JS well. On the off chance that you are now OK with JavaScript, learning Node.js won’t be an issue. Grammar, fundamental usefulness, primary standards — every one of these things are comparable.
In the event that you have JS designers in your group, it will be simpler for them to learn JS-based Node than a totally new dialect. What’s more, the front-end and back-end codebase will be basically the same, simple to peruse, and keep up — in light of the fact that they are both JS-based.
There’s another motivation behind why Node.js got famous so rapidly. The environment suits well the idea of microservice development (spilling stone monument usefulness into handfuls or many more modest administrations).
Microservices need to speak with one another rapidly — and Node.js is probably the quickest device in information handling. Among the fundamental Node.js benefits for programming development are its non-obstructing algorithms.
Node.js measures a few demands all at once without trusting that the first will be concluded. Many microservices can send messages to one another, and they will be gotten and addressed all the while.
Node.js was worked in view of adaptability — its name really says it. The environment permits numerous hubs to run all the while and speak with one another. Here’s the reason Node.js adaptability is better than other web backend development arrangements.
Node.js has a module that is liable for load adjusting for each running CPU center. This is one of numerous Node.js module benefits: you can run various hubs all at once, and the environment will naturally adjust the responsibility.
Node.js permits even apportioning: you can part your application into various situations. You show various forms of the application to different clients, in light of their age, interests, area, language, and so on. This builds personalization and diminishes responsibility. Hub accomplishes this with kid measures — tasks that rapidly speak with one another and share a similar root.
What’s more, Node’s non-hindering solicitation handling framework adds to fast, letting applications measure a great many solicitations.
Numerous designers consider nonconcurrent to be one of the two impediments and benefits of Node.js web application development. In Node, at whatever point the capacity is executed, the code consequently sends a callback. As the quantity of capacities develops, so does the number of callbacks — and you end up in a circumstance known as the callback damnation.
In any case, Node.js offers an exit plan. You can utilize systems that will plan capacities and sort through callbacks. Systems will associate comparable capacities consequently — so you can track down an essential component via search or in an envelope. At that point, there’s no compelling reason to look through callbacks.
So, these are some of the top benefits of Nodejs in web application development. This is how Nodejs is contributing a lot to the field of web application development.
I hope now you are totally aware of the whole process of how Nodejs is really important for your web project. If you are looking to hire a node js development company in India then I would suggest that you take a little consultancy too whenever you call.
Good Luck!
#node.js development company in india #node js development company #hire node js developers #hire node.js developers in india #node.js development services #node.js development