1560822874
Source code: https://github.com/didinj/node-express-postgresql-sequelize.git
Subscribe: https://www.youtube.com/channel/UCtI81hYLh2Ae_45KHkyy0vw/featured
The comprehensive step by step tutorial on building secure Node.js, Express.js, Passport.js, and PostgreSQL Restful Web Service. Previously, we have shown you a combination of Node.js, Express.js, and PostgreSQL tutorial. Now, we just add security for that REST API Web Service endpoints. Of course, we will start this tutorial from scratch or from zero application. We will use JWT for this Node.js, Express.js, Passport.js, and PostgreSQL tutorial.
The following tools, frameworks, and modules are required for this tutorial:
We assume that you have installed the PostgreSQL server in your machine or can use your own remote server (we are using PostgreSQL 9.5.13). Also, you have installed Node.js on your machine and can run `node`, `npm`, or `yarn` command in your terminal or command line. Next, check their version by type these commands in your terminal or command line.
node -v
v8.11.1
npm -v
6.1.0
yarn -v
1.7.0
That the versions that we are uses. You can watch the video tutorial on our YouTube channel. Let's continue with the main steps.
sudo npm install express-generator -g
express node-sequelize --view=ejs
cd node-sequelize && npm install
sudo npm install -g sequelize-cli
npm install --save sequelize
npm install --save pg pg-hstore
touch .sequelizerc
Open and edit that file then add these lines of codes.
const path = require('path');
module.exports = {
"config": path.resolve('./config', 'config.json'),
"models-path": path.resolve('./models'),
"seeders-path": path.resolve('./seeders'),
"migrations-path": path.resolve('./migrations')
};
Next, type this command to initialize the Sequelize.
sequelize init
That command will create `config/config.json`, `models/index.js`, `migrations`, and `seeders` directories and files. Next, open and edit `config/config.json` then make it like this.
{
"development": {
"username": "djamware",
"password": "dj@mw@r3",
"database": "node_sequelize",
"host": "127.0.0.1",
"dialect": "postgres"
},
"test": {
"username": "root",
"password": "dj@mw@r3",
"database": "node_sequelize",
"host": "127.0.0.1",
"dialect": "postgres"
},
"production": {
"username": "root",
"password": "dj@mw@r3",
"database": "node_sequelize",
"host": "127.0.0.1",
"dialect": "postgres"
}
}
psql postgres --u postgres
Next, type this command for creating a new user with a password then give access for creating the database.
postgres-# CREATE ROLE djamware WITH LOGIN PASSWORD 'dj@mw@r3';
postgres-# ALTER ROLE djamware CREATEDB;
Quit `psql` then log in again using the new user that previously created.
postgres-# \q
psql postgres -U djamware
Enter the password, then you will enter this `psql` console.
psql (9.5.13)
Type "help" for help.
postgres=>
Type this command to creating a new database.
postgres=> CREATE DATABASE node_sequelize;
Then give that new user privileges to the new database then quit the `psql`.
postgres=> GRANT ALL PRIVILEGES ON DATABASE node_sequelize TO djamware;
postgres=> \q
sequelize model:create --name Classroom --attributes class_name:string
sequelize model:create --name Student --attributes classroom_id:integer,student_name:string
sequelize model:create --name Lecturer --attributes lecturer_name:string
sequelize model:create --name Course --attributes lecturer_id:integer,course_name:string
sequelize model:create --name StudentCourse --attributes student_id:integer,course_id:integer
class Classroom extends Model {
static associate(models) {
Classroom.hasMany(models.Student, {
foreignKey: 'classroom_id',
as: 'students',
});
}
};
class Student extends Model {
static associate(models) {
Student.belongsTo(models.Classroom, {
foreignKey: 'classroom_id',
as: 'classroom'
});
Student.belongsToMany(models.Course, {
through: 'StudentCourse',
as: 'courses',
foreignKey: 'student_id'
});
}
};
class Lecturer extends Model {
static associate(models) {
Lecturer.hasOne(models.Course, {
foreignKey: 'lecturer_id',
as: 'course',
});
}
};
class Course extends Model {
static associate(models) {
Course.belongsToMany(models.Student, {
through: 'StudentCourse',
as: 'students',
foreignKey: 'course_id'
});
Course.belongsTo(models.Lecturer, {
foreignKey: 'lecturer_id',
as: 'lecturer'
});
}
};
sequelize db:migrate
create a folder for controllers and a new Javascript file by type these commands.
mkdir controllers
touch controllers/classroom.js
Open and edit `controllers/classroom.js` then add these lines of codes.
const Classroom = require('../models').Classroom;
const Student = require('../models').Student;
module.exports = {
list(req, res) {
return Classroom
.findAll({
include: [{
model: Student,
as: 'students'
}],
order: [
['createdAt', 'DESC'],
[{ model: Student, as: 'students' }, 'createdAt', 'DESC'],
],
})
.then((classrooms) => res.status(200).send(classrooms))
.catch((error) => { res.status(400).send(error); });
},
getById(req, res) {
return Classroom
.findByPk(req.params.id, {
include: [{
model: Student,
as: 'students'
}],
})
.then((classroom) => {
if (!classroom) {
return res.status(404).send({
message: 'Classroom Not Found',
});
}
return res.status(200).send(classroom);
})
.catch((error) => {
console.log(error);
res.status(400).send(error);
});
},
add(req, res) {
return Classroom
.create({
class_name: req.body.class_name,
})
.then((classroom) => res.status(201).send(classroom))
.catch((error) => res.status(400).send(error));
},
update(req, res) {
return Classroom
.findByPk(req.params.id, {
include: [{
model: Student,
as: 'students'
}],
})
.then(classroom => {
if (!classroom) {
return res.status(404).send({
message: 'Classroom Not Found',
});
}
return classroom
.update({
class_name: req.body.class_name || classroom.class_name,
})
.then(() => res.status(200).send(classroom))
.catch((error) => res.status(400).send(error));
})
.catch((error) => res.status(400).send(error));
},
delete(req, res) {
return Classroom
.findByPk(req.params.id)
.then(classroom => {
if (!classroom) {
return res.status(400).send({
message: 'Classroom Not Found',
});
}
return classroom
.destroy()
.then(() => res.status(204).send())
.catch((error) => res.status(400).send(error));
})
.catch((error) => res.status(400).send(error));
},
};
In that controller, we have all CRUD (Create, Read, Update, and Delete) functions. To make this controller available via the controller's folder, add these files for declaring this controller file and other controllers files.
touch controllers/index.js
Open and edit that file then add these lines of Javascript codes.
const classroom = require('./classroom');
module.exports = {
classroom,
};
Open and edit `routes/index.js` then declare the Classroom controller after other variables.
const classroomController = require('../controllers').classroom;
Add these routes after the existing route for the Classroom controller.
router.get('/api/classroom', classroomController.list);
router.get('/api/classroom/:id', classroomController.getById);
router.post('/api/classroom', classroomController.add);
router.put('/api/classroom/:id', classroomController.update);
router.delete('/api/classroom/:id', classroomController.delete);
touch controllers/student.js
Open and edit `controllers/student.js` then add these lines of codes that contain full CRUD function for the Student model.
const Student = require('../models').Student;
const Classroom = require('../models').Classroom;
const Course = require('../models').Course;
module.exports = {
list(req, res) {
return Student
.findAll({
include: [{
model: Classroom,
as: 'classroom'
},{
model: Course,
as: 'courses'
}],
order: [
['createdAt', 'DESC'],
[{ model: Course, as: 'courses' }, 'createdAt', 'DESC'],
],
})
.then((students) => res.status(200).send(students))
.catch((error) => { res.status(400).send(error); });
},
getById(req, res) {
return Student
.findByPk(req.params.id, {
include: [{
model: Classroom,
as: 'classroom'
},{
model: Course,
as: 'courses'
}],
})
.then((student) => {
if (!student) {
return res.status(404).send({
message: 'Student Not Found',
});
}
return res.status(200).send(student);
})
.catch((error) => res.status(400).send(error));
},
add(req, res) {
return Student
.create({
classroom_id: req.body.classroom_id,
student_name: req.body.student_name,
})
.then((student) => res.status(201).send(student))
.catch((error) => res.status(400).send(error));
},
update(req, res) {
return Student
.findByPk(req.params.id, {
include: [{
model: Classroom,
as: 'classroom'
},{
model: Course,
as: 'courses'
}],
})
.then(student => {
if (!student) {
return res.status(404).send({
message: 'Student Not Found',
});
}
return student
.update({
student_name: req.body.student_name || student.student_name,
})
.then(() => res.status(200).send(student))
.catch((error) => res.status(400).send(error));
})
.catch((error) => res.status(400).send(error));
},
delete(req, res) {
return Student
.findByPk(req.params.id)
.then(student => {
if (!student) {
return res.status(400).send({
message: 'Student Not Found',
});
}
return student
.destroy()
.then(() => res.status(204).send())
.catch((error) => res.status(400).send(error));
})
.catch((error) => res.status(400).send(error));
},
};
Open and edit `controllers/index.js` then register the Student controller in that file.
const classroom = require('./classroom');
const student = require('./student');
module.exports = {
classroom,
student,
};
Next, open and edit `routes/index.js` then add a required variable for the student controller.
const studentController = require('../controllers').student;
Add the routes for all CRUD functions of the student controller.
router.get('/api/student', studentController.list);
router.get('/api/student/:id', studentController.getById);
router.post('/api/student', studentController.add);
router.put('/api/student/:id', studentController.update);
router.delete('/api/student/:id', studentController.delete);
touch controllers/lecturer.js
Open and edit `controllers/lecturer.js` then add these lines of codes that contain full CRUD function for the Lecturer model.
const Lecturer = require('../models').Lecturer;
const Course = require('../models').Course;
module.exports = {
list(req, res) {
return Lecturer
.findAll({
include: [{
model: Course,
as: 'course'
}],
order: [
['createdAt', 'DESC'],
[{ model: Course, as: 'course' }, 'createdAt', 'DESC'],
],
})
.then((lecturers) => res.status(200).send(lecturers))
.catch((error) => { res.status(400).send(error); });
},
getById(req, res) {
return Lecturer
.findByPk(req.params.id, {
include: [{
model: Course,
as: 'course'
}],
})
.then((lecturer) => {
if (!lecturer) {
return res.status(404).send({
message: 'Lecturer Not Found',
});
}
return res.status(200).send(lecturer);
})
.catch((error) => res.status(400).send(error));
},
add(req, res) {
return Lecturer
.create({
lecturer_name: req.body.lecturer_name,
})
.then((lecturer) => res.status(201).send(lecturer))
.catch((error) => res.status(400).send(error));
},
update(req, res) {
return Lecturer
.findByPk(req.params.id, {
include: [{
model: Course,
as: 'course'
}],
})
.then(lecturer => {
if (!lecturer) {
return res.status(404).send({
message: 'Lecturer Not Found',
});
}
return lecturer
.update({
lecturer_name: req.body.lecturer_name || classroom.lecturer_name,
})
.then(() => res.status(200).send(lecturer))
.catch((error) => res.status(400).send(error));
})
.catch((error) => res.status(400).send(error));
},
delete(req, res) {
return Lecturer
.findByPk(req.params.id)
.then(lecturer => {
if (!lecturer) {
return res.status(400).send({
message: 'Lecturer Not Found',
});
}
return lecturer
.destroy()
.then(() => res.status(204).send())
.catch((error) => res.status(400).send(error));
})
.catch((error) => res.status(400).send(error));
},
};
Next, open and edit `controllers/index.js` then register the Lecturer controller in that file.
const classroom = require('./classroom');
const student = require('./student');
const lecturer = require('./lecturer');
module.exports = {
classroom,
student,
lecturer,
};
Next, open and edit `routes/index.js` then add a required variable for the lecturer controller.
const lecturerController = require('../controllers').lecturer;
Add the routes for all CRUD functions of the lecturer controller.
router.get('/api/lecturer', lecturerController.list);
router.get('/api/lecturer/:id', lecturerController.getById);
router.post('/api/lecturer', lecturerController.add);
router.put('/api/lecturer/:id', lecturerController.update);
router.delete('/api/lecturer/:id', lecturerController.delete);
touch controllers/course.js
const Course = require('../models').Course;
const Student = require('../models').Student;
const Lecturer = require('../models').Lecturer;
module.exports = {
list(req, res) {
return Course
.findAll({
include: [{
model: Student,
as: 'students'
},{
model: Lecturer,
as: 'lecturer'
}],
order: [
['createdAt', 'DESC'],
[{ model: Student, as: 'students' }, 'createdAt', 'DESC'],
],
})
.then((courses) => res.status(200).send(courses))
.catch((error) => { res.status(400).send(error); });
},
getById(req, res) {
return Course
.findByPk(req.params.id, {
include: [{
model: Course,
as: 'course'
}],
})
.then((course) => {
if (!course) {
return res.status(404).send({
message: 'Course Not Found',
});
}
return res.status(200).send(course);
})
.catch((error) => res.status(400).send(error));
},
add(req, res) {
return Course
.create({
course_name: req.body.course_name,
})
.then((course) => res.status(201).send(course))
.catch((error) => res.status(400).send(error));
},
update(req, res) {
return Course
.findByPk(req.params.id, {
include: [{
model: Course,
as: 'course'
}],
})
.then(course => {
if (!course) {
return res.status(404).send({
message: 'Course Not Found',
});
}
return course
.update({
course_name: req.body.course_name || classroom.course_name,
})
.then(() => res.status(200).send(course))
.catch((error) => res.status(400).send(error));
})
.catch((error) => res.status(400).send(error));
},
delete(req, res) {
return Course
.findByPk(req.params.id)
.then(course => {
if (!course) {
return res.status(400).send({
message: 'Course Not Found',
});
}
return course
.destroy()
.then(() => res.status(204).send())
.catch((error) => res.status(400).send(error));
})
.catch((error) => res.status(400).send(error));
},
};
Next, open and edit `controllers/index.js` then register the Course controller in that file.
const classroom = require('./classroom');
const student = require('./student');
const lecturer = require('./lecturer');
const course = require('./course');
module.exports = {
classroom,
student,
lecturer,
course,
};
Next, open and edit `routes/index.js` then add a required variable for the course controller.
const courseController = require('../controllers').course;
Add the routes for all CRUD functions of the course controller.
router.get('/api/course', courseController.list);
router.get('/api/course/:id', courseController.getById);
router.post('/api/course', courseController.add);
router.put('/api/course/:id', courseController.update);
router.delete('/api/course/:id', courseController.delete);
To make a Classroom include the students, add this function to `controllers/classroom.js`.
addWithStudents(req, res) {
return Classroom
.create({
class_name: req.body.class_name,
students: req.body.students,
}, {
include: [{
model: Student,
as: 'students'
}]
})
.then((classroom) => res.status(201).send(classroom))
.catch((error) => res.status(400).send(error));
},
Next, add this new function to the route file `routes/index.js`.
router.post('/api/classroom/add_with_students', classroomController.addWithStudents);
To add a lecturer include a course, add this function to `controllers/lecturer.js`.
addWithCourse(req, res) {
return Lecturer
.create({
lecturer_name: req.body.lecturer_name,
course: req.body.course
}, {
include: [{
model: Course,
as: 'course'
}]
})
.then((lecturer) => res.status(201).send(lecturer))
.catch((error) => res.status(400).send(error));
},
Next, add this new function to the route file `routes/index.js`.
router.post('/api/lecturer/add_with_course', lecturerController.addWithCourse);
To add a course for a student, add this function to `controllers/student.js`.
addCourse(req, res) {
return Student
.findByPk(req.body.student_id, {
include: [{
model: Classroom,
as: 'classroom'
},{
model: Course,
as: 'courses'
}],
})
.then((student) => {
if (!student) {
return res.status(404).send({
message: 'Student Not Found',
});
}
Course.findByPk(req.body.course_id).then((course) => {
if (!course) {
return res.status(404).send({
message: 'Course Not Found',
});
}
student.addCourse(course);
return res.status(200).send(student);
})
})
.catch((error) => res.status(400).send(error));
},
Next, add this new function to the route file `routes/index.js`.
router.post('/api/student/add_course', studentController.addCourse);
Type this command to run the application.
nodemon
Open the new terminal tab or command line tab then type this command for save or persist classroom data include with students.
curl -i -X POST -H "Content-Type: application/json" -d '{ "class_name":"Class A","students": [{ "student_name":"John Doe" },{ "student_name":"Jane Doe" },{ "student_name":"Doe Doel" }] }' localhost:3000/api/classroom/add_with_students
To see data persist to PostgreSQL table, open a new terminal tab then run `psql`.
psql postgres -U djamware
Connect to the database then running the queries.
postgres=> \c node_sequelize
node_sequelize=> SELECT * FROM public."Classrooms";
id | class_name | createdAt | updatedAt
----+------------+----------------------------+----------------------------
2 | Class A | 2018-07-24 09:18:30.062+07 | 2018-07-24 09:18:30.062+07
(1 row)
node_sequelize=> SELECT * FROM public."Students" WHERE classroom_id=2;
id | classroom_id | student_name | createdAt | updatedAt
----+--------------+--------------+----------------------------+----------------------------
1 | 2 | John Doe | 2018-07-24 09:18:30.125+07 | 2018-07-24 09:18:30.125+07
2 | 2 | Jane Doe | 2018-07-24 09:18:30.125+07 | 2018-07-24 09:18:30.125+07
3 | 2 | Doe Doel | 2018-07-24 09:18:30.125+07 | 2018-07-24 09:18:30.125+07
(3 rows)
Using `curl` you just get a classroom then the students will be included with the response.
curl -i -H "Accept: application/json" localhost:3000/api/classroom/2
HTTP/1.1 200 OK
X-Powered-By: Express
Content-Type: application/json; charset=utf-8
Content-Length: 512
ETag: W/"200-9RPafOJtDdkqqMBVkSNCFoQ3p9s"
Date: Tue, 24 Jul 2018 03:18:45 GMT
Connection: keep-alive
{"id":2,"class_name":"Class A","createdAt":"2018-07-24T02:18:30.062Z","updatedAt":"2018-07-24T02:18:30.062Z","students":[{"id":1,"classroom_id":2,"student_name":"John Doe","createdAt":"2018-07-24T02:18:30.125Z","updatedAt":"2018-07-24T02:18:30.125Z"},{"id":2,"classroom_id":2,"student_name":"Jane Doe","createdAt":"2018-07-24T02:18:30.125Z","updatedAt":"2018-07-24T02:18:30.125Z"},{"id":3,"classroom_id":2,"student_name":"Doe Doel","createdAt":"2018-07-24T02:18:30.125Z","updatedAt":"2018-07-24T02:18:30.125Z"}]}
Run this `curl` for save or persist Lecturer, Course, and Student/Course data.
curl -i -X POST -H "Content-Type: application/json" -d '{ "lecturer_name":"Kylian Mbappe","course": { "course_name":"English Grammar" }}' localhost:3000/api/lecturer/add_with_course
curl -i -X POST -H "Content-Type: application/json" -d '{ "student_id":1,"course_id": 1}' localhost:3000/api/student/add_course
Originally published by Didin J at djamware
Happy Coding!!!
#node-js #express #postgresql #javascript
1595396220
As more and more data is exposed via APIs either as API-first companies or for the explosion of single page apps/JAMStack, API security can no longer be an afterthought. The hard part about APIs is that it provides direct access to large amounts of data while bypassing browser precautions. Instead of worrying about SQL injection and XSS issues, you should be concerned about the bad actor who was able to paginate through all your customer records and their data.
Typical prevention mechanisms like Captchas and browser fingerprinting won’t work since APIs by design need to handle a very large number of API accesses even by a single customer. So where do you start? The first thing is to put yourself in the shoes of a hacker and then instrument your APIs to detect and block common attacks along with unknown unknowns for zero-day exploits. Some of these are on the OWASP Security API list, but not all.
Most APIs provide access to resources that are lists of entities such as /users
or /widgets
. A client such as a browser would typically filter and paginate through this list to limit the number items returned to a client like so:
First Call: GET /items?skip=0&take=10
Second Call: GET /items?skip=10&take=10
However, if that entity has any PII or other information, then a hacker could scrape that endpoint to get a dump of all entities in your database. This could be most dangerous if those entities accidently exposed PII or other sensitive information, but could also be dangerous in providing competitors or others with adoption and usage stats for your business or provide scammers with a way to get large email lists. See how Venmo data was scraped
A naive protection mechanism would be to check the take count and throw an error if greater than 100 or 1000. The problem with this is two-fold:
skip = 0
while True: response = requests.post('https://api.acmeinc.com/widgets?take=10&skip=' + skip), headers={'Authorization': 'Bearer' + ' ' + sys.argv[1]}) print("Fetched 10 items") sleep(randint(100,1000)) skip += 10
To secure against pagination attacks, you should track how many items of a single resource are accessed within a certain time period for each user or API key rather than just at the request level. By tracking API resource access at the user level, you can block a user or API key once they hit a threshold such as “touched 1,000,000 items in a one hour period”. This is dependent on your API use case and can even be dependent on their subscription with you. Like a Captcha, this can slow down the speed that a hacker can exploit your API, like a Captcha if they have to create a new user account manually to create a new API key.
Most APIs are protected by some sort of API key or JWT (JSON Web Token). This provides a natural way to track and protect your API as API security tools can detect abnormal API behavior and block access to an API key automatically. However, hackers will want to outsmart these mechanisms by generating and using a large pool of API keys from a large number of users just like a web hacker would use a large pool of IP addresses to circumvent DDoS protection.
The easiest way to secure against these types of attacks is by requiring a human to sign up for your service and generate API keys. Bot traffic can be prevented with things like Captcha and 2-Factor Authentication. Unless there is a legitimate business case, new users who sign up for your service should not have the ability to generate API keys programmatically. Instead, only trusted customers should have the ability to generate API keys programmatically. Go one step further and ensure any anomaly detection for abnormal behavior is done at the user and account level, not just for each API key.
APIs are used in a way that increases the probability credentials are leaked:
If a key is exposed due to user error, one may think you as the API provider has any blame. However, security is all about reducing surface area and risk. Treat your customer data as if it’s your own and help them by adding guards that prevent accidental key exposure.
The easiest way to prevent key exposure is by leveraging two tokens rather than one. A refresh token is stored as an environment variable and can only be used to generate short lived access tokens. Unlike the refresh token, these short lived tokens can access the resources, but are time limited such as in hours or days.
The customer will store the refresh token with other API keys. Then your SDK will generate access tokens on SDK init or when the last access token expires. If a CURL command gets pasted into a GitHub issue, then a hacker would need to use it within hours reducing the attack vector (unless it was the actual refresh token which is low probability)
APIs open up entirely new business models where customers can access your API platform programmatically. However, this can make DDoS protection tricky. Most DDoS protection is designed to absorb and reject a large number of requests from bad actors during DDoS attacks but still need to let the good ones through. This requires fingerprinting the HTTP requests to check against what looks like bot traffic. This is much harder for API products as all traffic looks like bot traffic and is not coming from a browser where things like cookies are present.
The magical part about APIs is almost every access requires an API Key. If a request doesn’t have an API key, you can automatically reject it which is lightweight on your servers (Ensure authentication is short circuited very early before later middleware like request JSON parsing). So then how do you handle authenticated requests? The easiest is to leverage rate limit counters for each API key such as to handle X requests per minute and reject those above the threshold with a 429 HTTP response.
There are a variety of algorithms to do this such as leaky bucket and fixed window counters.
APIs are no different than web servers when it comes to good server hygiene. Data can be leaked due to misconfigured SSL certificate or allowing non-HTTPS traffic. For modern applications, there is very little reason to accept non-HTTPS requests, but a customer could mistakenly issue a non HTTP request from their application or CURL exposing the API key. APIs do not have the protection of a browser so things like HSTS or redirect to HTTPS offer no protection.
Test your SSL implementation over at Qualys SSL Test or similar tool. You should also block all non-HTTP requests which can be done within your load balancer. You should also remove any HTTP headers scrub any error messages that leak implementation details. If your API is used only by your own apps or can only be accessed server-side, then review Authoritative guide to Cross-Origin Resource Sharing for REST APIs
APIs provide access to dynamic data that’s scoped to each API key. Any caching implementation should have the ability to scope to an API key to prevent cross-pollution. Even if you don’t cache anything in your infrastructure, you could expose your customers to security holes. If a customer with a proxy server was using multiple API keys such as one for development and one for production, then they could see cross-pollinated data.
#api management #api security #api best practices #api providers #security analytics #api management policies #api access tokens #api access #api security risks #api access keys
1632537859
Not babashka. Node.js babashka!?
Ad-hoc CLJS scripting on Node.js.
Experimental. Please report issues here.
Nbb's main goal is to make it easy to get started with ad hoc CLJS scripting on Node.js.
Additional goals and features are:
Nbb requires Node.js v12 or newer.
CLJS code is evaluated through SCI, the same interpreter that powers babashka. Because SCI works with advanced compilation, the bundle size, especially when combined with other dependencies, is smaller than what you get with self-hosted CLJS. That makes startup faster. The trade-off is that execution is less performant and that only a subset of CLJS is available (e.g. no deftype, yet).
Install nbb
from NPM:
$ npm install nbb -g
Omit -g
for a local install.
Try out an expression:
$ nbb -e '(+ 1 2 3)'
6
And then install some other NPM libraries to use in the script. E.g.:
$ npm install csv-parse shelljs zx
Create a script which uses the NPM libraries:
(ns script
(:require ["csv-parse/lib/sync$default" :as csv-parse]
["fs" :as fs]
["path" :as path]
["shelljs$default" :as sh]
["term-size$default" :as term-size]
["zx$default" :as zx]
["zx$fs" :as zxfs]
[nbb.core :refer [*file*]]))
(prn (path/resolve "."))
(prn (term-size))
(println (count (str (fs/readFileSync *file*))))
(prn (sh/ls "."))
(prn (csv-parse "foo,bar"))
(prn (zxfs/existsSync *file*))
(zx/$ #js ["ls"])
Call the script:
$ nbb script.cljs
"/private/tmp/test-script"
#js {:columns 216, :rows 47}
510
#js ["node_modules" "package-lock.json" "package.json" "script.cljs"]
#js [#js ["foo" "bar"]]
true
$ ls
node_modules
package-lock.json
package.json
script.cljs
Nbb has first class support for macros: you can define them right inside your .cljs
file, like you are used to from JVM Clojure. Consider the plet
macro to make working with promises more palatable:
(defmacro plet
[bindings & body]
(let [binding-pairs (reverse (partition 2 bindings))
body (cons 'do body)]
(reduce (fn [body [sym expr]]
(let [expr (list '.resolve 'js/Promise expr)]
(list '.then expr (list 'clojure.core/fn (vector sym)
body))))
body
binding-pairs)))
Using this macro we can look async code more like sync code. Consider this puppeteer example:
(-> (.launch puppeteer)
(.then (fn [browser]
(-> (.newPage browser)
(.then (fn [page]
(-> (.goto page "https://clojure.org")
(.then #(.screenshot page #js{:path "screenshot.png"}))
(.catch #(js/console.log %))
(.then #(.close browser)))))))))
Using plet
this becomes:
(plet [browser (.launch puppeteer)
page (.newPage browser)
_ (.goto page "https://clojure.org")
_ (-> (.screenshot page #js{:path "screenshot.png"})
(.catch #(js/console.log %)))]
(.close browser))
See the puppeteer example for the full code.
Since v0.0.36, nbb includes promesa which is a library to deal with promises. The above plet
macro is similar to promesa.core/let
.
$ time nbb -e '(+ 1 2 3)'
6
nbb -e '(+ 1 2 3)' 0.17s user 0.02s system 109% cpu 0.168 total
The baseline startup time for a script is about 170ms seconds on my laptop. When invoked via npx
this adds another 300ms or so, so for faster startup, either use a globally installed nbb
or use $(npm bin)/nbb script.cljs
to bypass npx
.
Nbb does not depend on any NPM dependencies. All NPM libraries loaded by a script are resolved relative to that script. When using the Reagent module, React is resolved in the same way as any other NPM library.
To load .cljs
files from local paths or dependencies, you can use the --classpath
argument. The current dir is added to the classpath automatically. So if there is a file foo/bar.cljs
relative to your current dir, then you can load it via (:require [foo.bar :as fb])
. Note that nbb
uses the same naming conventions for namespaces and directories as other Clojure tools: foo-bar
in the namespace name becomes foo_bar
in the directory name.
To load dependencies from the Clojure ecosystem, you can use the Clojure CLI or babashka to download them and produce a classpath:
$ classpath="$(clojure -A:nbb -Spath -Sdeps '{:aliases {:nbb {:replace-deps {com.github.seancorfield/honeysql {:git/tag "v2.0.0-rc5" :git/sha "01c3a55"}}}}}')"
and then feed it to the --classpath
argument:
$ nbb --classpath "$classpath" -e "(require '[honey.sql :as sql]) (sql/format {:select :foo :from :bar :where [:= :baz 2]})"
["SELECT foo FROM bar WHERE baz = ?" 2]
Currently nbb
only reads from directories, not jar files, so you are encouraged to use git libs. Support for .jar
files will be added later.
The name of the file that is currently being executed is available via nbb.core/*file*
or on the metadata of vars:
(ns foo
(:require [nbb.core :refer [*file*]]))
(prn *file*) ;; "/private/tmp/foo.cljs"
(defn f [])
(prn (:file (meta #'f))) ;; "/private/tmp/foo.cljs"
Nbb includes reagent.core
which will be lazily loaded when required. You can use this together with ink to create a TUI application:
$ npm install ink
ink-demo.cljs
:
(ns ink-demo
(:require ["ink" :refer [render Text]]
[reagent.core :as r]))
(defonce state (r/atom 0))
(doseq [n (range 1 11)]
(js/setTimeout #(swap! state inc) (* n 500)))
(defn hello []
[:> Text {:color "green"} "Hello, world! " @state])
(render (r/as-element [hello]))
Working with callbacks and promises can become tedious. Since nbb v0.0.36 the promesa.core
namespace is included with the let
and do!
macros. An example:
(ns prom
(:require [promesa.core :as p]))
(defn sleep [ms]
(js/Promise.
(fn [resolve _]
(js/setTimeout resolve ms))))
(defn do-stuff
[]
(p/do!
(println "Doing stuff which takes a while")
(sleep 1000)
1))
(p/let [a (do-stuff)
b (inc a)
c (do-stuff)
d (+ b c)]
(prn d))
$ nbb prom.cljs
Doing stuff which takes a while
Doing stuff which takes a while
3
Also see API docs.
Since nbb v0.0.75 applied-science/js-interop is available:
(ns example
(:require [applied-science.js-interop :as j]))
(def o (j/lit {:a 1 :b 2 :c {:d 1}}))
(prn (j/select-keys o [:a :b])) ;; #js {:a 1, :b 2}
(prn (j/get-in o [:c :d])) ;; 1
Most of this library is supported in nbb, except the following:
:syms
.-x
notation. In nbb, you must use keywords.See the example of what is currently supported.
See the examples directory for small examples.
Also check out these projects built with nbb:
See API documentation.
See this gist on how to convert an nbb script or project to shadow-cljs.
Prequisites:
To build:
bb release
Run bb tasks
for more project-related tasks.
Download Details:
Author: borkdude
Download Link: Download The Source Code
Official Website: https://github.com/borkdude/nbb
License: EPL-1.0
#node #javascript
1667425440
Perl script converts PDF files to Gerber format
Pdf2Gerb generates Gerber 274X photoplotting and Excellon drill files from PDFs of a PCB. Up to three PDFs are used: the top copper layer, the bottom copper layer (for 2-sided PCBs), and an optional silk screen layer. The PDFs can be created directly from any PDF drawing software, or a PDF print driver can be used to capture the Print output if the drawing software does not directly support output to PDF.
The general workflow is as follows:
Please note that Pdf2Gerb does NOT perform DRC (Design Rule Checks), as these will vary according to individual PCB manufacturer conventions and capabilities. Also note that Pdf2Gerb is not perfect, so the output files must always be checked before submitting them. As of version 1.6, Pdf2Gerb supports most PCB elements, such as round and square pads, round holes, traces, SMD pads, ground planes, no-fill areas, and panelization. However, because it interprets the graphical output of a Print function, there are limitations in what it can recognize (or there may be bugs).
See docs/Pdf2Gerb.pdf for install/setup, config, usage, and other info.
#Pdf2Gerb config settings:
#Put this file in same folder/directory as pdf2gerb.pl itself (global settings),
#or copy to another folder/directory with PDFs if you want PCB-specific settings.
#There is only one user of this file, so we don't need a custom package or namespace.
#NOTE: all constants defined in here will be added to main namespace.
#package pdf2gerb_cfg;
use strict; #trap undef vars (easier debug)
use warnings; #other useful info (easier debug)
##############################################################################################
#configurable settings:
#change values here instead of in main pfg2gerb.pl file
use constant WANT_COLORS => ($^O !~ m/Win/); #ANSI colors no worky on Windows? this must be set < first DebugPrint() call
#just a little warning; set realistic expectations:
#DebugPrint("${\(CYAN)}Pdf2Gerb.pl ${\(VERSION)}, $^O O/S\n${\(YELLOW)}${\(BOLD)}${\(ITALIC)}This is EXPERIMENTAL software. \nGerber files MAY CONTAIN ERRORS. Please CHECK them before fabrication!${\(RESET)}", 0); #if WANT_DEBUG
use constant METRIC => FALSE; #set to TRUE for metric units (only affect final numbers in output files, not internal arithmetic)
use constant APERTURE_LIMIT => 0; #34; #max #apertures to use; generate warnings if too many apertures are used (0 to not check)
use constant DRILL_FMT => '2.4'; #'2.3'; #'2.4' is the default for PCB fab; change to '2.3' for CNC
use constant WANT_DEBUG => 0; #10; #level of debug wanted; higher == more, lower == less, 0 == none
use constant GERBER_DEBUG => 0; #level of debug to include in Gerber file; DON'T USE FOR FABRICATION
use constant WANT_STREAMS => FALSE; #TRUE; #save decompressed streams to files (for debug)
use constant WANT_ALLINPUT => FALSE; #TRUE; #save entire input stream (for debug ONLY)
#DebugPrint(sprintf("${\(CYAN)}DEBUG: stdout %d, gerber %d, want streams? %d, all input? %d, O/S: $^O, Perl: $]${\(RESET)}\n", WANT_DEBUG, GERBER_DEBUG, WANT_STREAMS, WANT_ALLINPUT), 1);
#DebugPrint(sprintf("max int = %d, min int = %d\n", MAXINT, MININT), 1);
#define standard trace and pad sizes to reduce scaling or PDF rendering errors:
#This avoids weird aperture settings and replaces them with more standardized values.
#(I'm not sure how photoplotters handle strange sizes).
#Fewer choices here gives more accurate mapping in the final Gerber files.
#units are in inches
use constant TOOL_SIZES => #add more as desired
(
#round or square pads (> 0) and drills (< 0):
.010, -.001, #tiny pads for SMD; dummy drill size (too small for practical use, but needed so StandardTool will use this entry)
.031, -.014, #used for vias
.041, -.020, #smallest non-filled plated hole
.051, -.025,
.056, -.029, #useful for IC pins
.070, -.033,
.075, -.040, #heavier leads
# .090, -.043, #NOTE: 600 dpi is not high enough resolution to reliably distinguish between .043" and .046", so choose 1 of the 2 here
.100, -.046,
.115, -.052,
.130, -.061,
.140, -.067,
.150, -.079,
.175, -.088,
.190, -.093,
.200, -.100,
.220, -.110,
.160, -.125, #useful for mounting holes
#some additional pad sizes without holes (repeat a previous hole size if you just want the pad size):
.090, -.040, #want a .090 pad option, but use dummy hole size
.065, -.040, #.065 x .065 rect pad
.035, -.040, #.035 x .065 rect pad
#traces:
.001, #too thin for real traces; use only for board outlines
.006, #minimum real trace width; mainly used for text
.008, #mainly used for mid-sized text, not traces
.010, #minimum recommended trace width for low-current signals
.012,
.015, #moderate low-voltage current
.020, #heavier trace for power, ground (even if a lighter one is adequate)
.025,
.030, #heavy-current traces; be careful with these ones!
.040,
.050,
.060,
.080,
.100,
.120,
);
#Areas larger than the values below will be filled with parallel lines:
#This cuts down on the number of aperture sizes used.
#Set to 0 to always use an aperture or drill, regardless of size.
use constant { MAX_APERTURE => max((TOOL_SIZES)) + .004, MAX_DRILL => -min((TOOL_SIZES)) + .004 }; #max aperture and drill sizes (plus a little tolerance)
#DebugPrint(sprintf("using %d standard tool sizes: %s, max aper %.3f, max drill %.3f\n", scalar((TOOL_SIZES)), join(", ", (TOOL_SIZES)), MAX_APERTURE, MAX_DRILL), 1);
#NOTE: Compare the PDF to the original CAD file to check the accuracy of the PDF rendering and parsing!
#for example, the CAD software I used generated the following circles for holes:
#CAD hole size: parsed PDF diameter: error:
# .014 .016 +.002
# .020 .02267 +.00267
# .025 .026 +.001
# .029 .03167 +.00267
# .033 .036 +.003
# .040 .04267 +.00267
#This was usually ~ .002" - .003" too big compared to the hole as displayed in the CAD software.
#To compensate for PDF rendering errors (either during CAD Print function or PDF parsing logic), adjust the values below as needed.
#units are pixels; for example, a value of 2.4 at 600 dpi = .0004 inch, 2 at 600 dpi = .0033"
use constant
{
HOLE_ADJUST => -0.004 * 600, #-2.6, #holes seemed to be slightly oversized (by .002" - .004"), so shrink them a little
RNDPAD_ADJUST => -0.003 * 600, #-2, #-2.4, #round pads seemed to be slightly oversized, so shrink them a little
SQRPAD_ADJUST => +0.001 * 600, #+.5, #square pads are sometimes too small by .00067, so bump them up a little
RECTPAD_ADJUST => 0, #(pixels) rectangular pads seem to be okay? (not tested much)
TRACE_ADJUST => 0, #(pixels) traces seemed to be okay?
REDUCE_TOLERANCE => .001, #(inches) allow this much variation when reducing circles and rects
};
#Also, my CAD's Print function or the PDF print driver I used was a little off for circles, so define some additional adjustment values here:
#Values are added to X/Y coordinates; units are pixels; for example, a value of 1 at 600 dpi would be ~= .002 inch
use constant
{
CIRCLE_ADJUST_MINX => 0,
CIRCLE_ADJUST_MINY => -0.001 * 600, #-1, #circles were a little too high, so nudge them a little lower
CIRCLE_ADJUST_MAXX => +0.001 * 600, #+1, #circles were a little too far to the left, so nudge them a little to the right
CIRCLE_ADJUST_MAXY => 0,
SUBST_CIRCLE_CLIPRECT => FALSE, #generate circle and substitute for clip rects (to compensate for the way some CAD software draws circles)
WANT_CLIPRECT => TRUE, #FALSE, #AI doesn't need clip rect at all? should be on normally?
RECT_COMPLETION => FALSE, #TRUE, #fill in 4th side of rect when 3 sides found
};
#allow .012 clearance around pads for solder mask:
#This value effectively adjusts pad sizes in the TOOL_SIZES list above (only for solder mask layers).
use constant SOLDER_MARGIN => +.012; #units are inches
#line join/cap styles:
use constant
{
CAP_NONE => 0, #butt (none); line is exact length
CAP_ROUND => 1, #round cap/join; line overhangs by a semi-circle at either end
CAP_SQUARE => 2, #square cap/join; line overhangs by a half square on either end
CAP_OVERRIDE => FALSE, #cap style overrides drawing logic
};
#number of elements in each shape type:
use constant
{
RECT_SHAPELEN => 6, #x0, y0, x1, y1, count, "rect" (start, end corners)
LINE_SHAPELEN => 6, #x0, y0, x1, y1, count, "line" (line seg)
CURVE_SHAPELEN => 10, #xstart, ystart, x0, y0, x1, y1, xend, yend, count, "curve" (bezier 2 points)
CIRCLE_SHAPELEN => 5, #x, y, 5, count, "circle" (center + radius)
};
#const my %SHAPELEN =
#Readonly my %SHAPELEN =>
our %SHAPELEN =
(
rect => RECT_SHAPELEN,
line => LINE_SHAPELEN,
curve => CURVE_SHAPELEN,
circle => CIRCLE_SHAPELEN,
);
#panelization:
#This will repeat the entire body the number of times indicated along the X or Y axes (files grow accordingly).
#Display elements that overhang PCB boundary can be squashed or left as-is (typically text or other silk screen markings).
#Set "overhangs" TRUE to allow overhangs, FALSE to truncate them.
#xpad and ypad allow margins to be added around outer edge of panelized PCB.
use constant PANELIZE => {'x' => 1, 'y' => 1, 'xpad' => 0, 'ypad' => 0, 'overhangs' => TRUE}; #number of times to repeat in X and Y directions
# Set this to 1 if you need TurboCAD support.
#$turboCAD = FALSE; #is this still needed as an option?
#CIRCAD pad generation uses an appropriate aperture, then moves it (stroke) "a little" - we use this to find pads and distinguish them from PCB holes.
use constant PAD_STROKE => 0.3; #0.0005 * 600; #units are pixels
#convert very short traces to pads or holes:
use constant TRACE_MINLEN => .001; #units are inches
#use constant ALWAYS_XY => TRUE; #FALSE; #force XY even if X or Y doesn't change; NOTE: needs to be TRUE for all pads to show in FlatCAM and ViewPlot
use constant REMOVE_POLARITY => FALSE; #TRUE; #set to remove subtractive (negative) polarity; NOTE: must be FALSE for ground planes
#PDF uses "points", each point = 1/72 inch
#combined with a PDF scale factor of .12, this gives 600 dpi resolution (1/72 * .12 = 600 dpi)
use constant INCHES_PER_POINT => 1/72; #0.0138888889; #multiply point-size by this to get inches
# The precision used when computing a bezier curve. Higher numbers are more precise but slower (and generate larger files).
#$bezierPrecision = 100;
use constant BEZIER_PRECISION => 36; #100; #use const; reduced for faster rendering (mainly used for silk screen and thermal pads)
# Ground planes and silk screen or larger copper rectangles or circles are filled line-by-line using this resolution.
use constant FILL_WIDTH => .01; #fill at most 0.01 inch at a time
# The max number of characters to read into memory
use constant MAX_BYTES => 10 * M; #bumped up to 10 MB, use const
use constant DUP_DRILL1 => TRUE; #FALSE; #kludge: ViewPlot doesn't load drill files that are too small so duplicate first tool
my $runtime = time(); #Time::HiRes::gettimeofday(); #measure my execution time
print STDERR "Loaded config settings from '${\(__FILE__)}'.\n";
1; #last value must be truthful to indicate successful load
#############################################################################################
#junk/experiment:
#use Package::Constants;
#use Exporter qw(import); #https://perldoc.perl.org/Exporter.html
#my $caller = "pdf2gerb::";
#sub cfg
#{
# my $proto = shift;
# my $class = ref($proto) || $proto;
# my $settings =
# {
# $WANT_DEBUG => 990, #10; #level of debug wanted; higher == more, lower == less, 0 == none
# };
# bless($settings, $class);
# return $settings;
#}
#use constant HELLO => "hi there2"; #"main::HELLO" => "hi there";
#use constant GOODBYE => 14; #"main::GOODBYE" => 12;
#print STDERR "read cfg file\n";
#our @EXPORT_OK = Package::Constants->list(__PACKAGE__); #https://www.perlmonks.org/?node_id=1072691; NOTE: "_OK" skips short/common names
#print STDERR scalar(@EXPORT_OK) . " consts exported:\n";
#foreach(@EXPORT_OK) { print STDERR "$_\n"; }
#my $val = main::thing("xyz");
#print STDERR "caller gave me $val\n";
#foreach my $arg (@ARGV) { print STDERR "arg $arg\n"; }
Author: swannman
Source Code: https://github.com/swannman/pdf2gerb
License: GPL-3.0 license
1622719015
Front-end web development has been overwhelmed by JavaScript highlights for quite a long time. Google, Facebook, Wikipedia, and most of all online pages use JS for customer side activities. As of late, it additionally made a shift to cross-platform mobile development as a main technology in React Native, Nativescript, Apache Cordova, and other crossover devices.
Throughout the most recent couple of years, Node.js moved to backend development as well. Designers need to utilize a similar tech stack for the whole web project without learning another language for server-side development. Node.js is a device that adjusts JS usefulness and syntax to the backend.
Node.js isn’t a language, or library, or system. It’s a runtime situation: commonly JavaScript needs a program to work, however Node.js makes appropriate settings for JS to run outside of the program. It’s based on a JavaScript V8 motor that can run in Chrome, different programs, or independently.
The extent of V8 is to change JS program situated code into machine code — so JS turns into a broadly useful language and can be perceived by servers. This is one of the advantages of utilizing Node.js in web application development: it expands the usefulness of JavaScript, permitting designers to coordinate the language with APIs, different languages, and outside libraries.
Of late, organizations have been effectively changing from their backend tech stacks to Node.js. LinkedIn picked Node.js over Ruby on Rails since it took care of expanding responsibility better and decreased the quantity of servers by multiple times. PayPal and Netflix did something comparative, just they had a goal to change their design to microservices. We should investigate the motivations to pick Node.JS for web application development and when we are planning to hire node js developers.
The principal thing that makes Node.js a go-to environment for web development is its JavaScript legacy. It’s the most well known language right now with a great many free devices and a functioning local area. Node.js, because of its association with JS, immediately rose in ubiquity — presently it has in excess of 368 million downloads and a great many free tools in the bundle module.
Alongside prevalence, Node.js additionally acquired the fundamental JS benefits:
In addition, it’s a piece of a well known MEAN tech stack (the blend of MongoDB, Express.js, Angular, and Node.js — four tools that handle all vital parts of web application development).
This is perhaps the most clear advantage of Node.js web application development. JavaScript is an unquestionable requirement for web development. Regardless of whether you construct a multi-page or single-page application, you need to know JS well. On the off chance that you are now OK with JavaScript, learning Node.js won’t be an issue. Grammar, fundamental usefulness, primary standards — every one of these things are comparable.
In the event that you have JS designers in your group, it will be simpler for them to learn JS-based Node than a totally new dialect. What’s more, the front-end and back-end codebase will be basically the same, simple to peruse, and keep up — in light of the fact that they are both JS-based.
There’s another motivation behind why Node.js got famous so rapidly. The environment suits well the idea of microservice development (spilling stone monument usefulness into handfuls or many more modest administrations).
Microservices need to speak with one another rapidly — and Node.js is probably the quickest device in information handling. Among the fundamental Node.js benefits for programming development are its non-obstructing algorithms.
Node.js measures a few demands all at once without trusting that the first will be concluded. Many microservices can send messages to one another, and they will be gotten and addressed all the while.
Node.js was worked in view of adaptability — its name really says it. The environment permits numerous hubs to run all the while and speak with one another. Here’s the reason Node.js adaptability is better than other web backend development arrangements.
Node.js has a module that is liable for load adjusting for each running CPU center. This is one of numerous Node.js module benefits: you can run various hubs all at once, and the environment will naturally adjust the responsibility.
Node.js permits even apportioning: you can part your application into various situations. You show various forms of the application to different clients, in light of their age, interests, area, language, and so on. This builds personalization and diminishes responsibility. Hub accomplishes this with kid measures — tasks that rapidly speak with one another and share a similar root.
What’s more, Node’s non-hindering solicitation handling framework adds to fast, letting applications measure a great many solicitations.
Numerous designers consider nonconcurrent to be one of the two impediments and benefits of Node.js web application development. In Node, at whatever point the capacity is executed, the code consequently sends a callback. As the quantity of capacities develops, so does the number of callbacks — and you end up in a circumstance known as the callback damnation.
In any case, Node.js offers an exit plan. You can utilize systems that will plan capacities and sort through callbacks. Systems will associate comparable capacities consequently — so you can track down an essential component via search or in an envelope. At that point, there’s no compelling reason to look through callbacks.
So, these are some of the top benefits of Nodejs in web application development. This is how Nodejs is contributing a lot to the field of web application development.
I hope now you are totally aware of the whole process of how Nodejs is really important for your web project. If you are looking to hire a node js development company in India then I would suggest that you take a little consultancy too whenever you call.
Good Luck!
#node.js development company in india #node js development company #hire node js developers #hire node.js developers in india #node.js development services #node.js development
1616671994
If you look at the backend technology used by today’s most popular apps there is one thing you would find common among them and that is the use of NodeJS Framework. Yes, the NodeJS framework is that effective and successful.
If you wish to have a strong backend for efficient app performance then have NodeJS at the backend.
WebClues Infotech offers different levels of experienced and expert professionals for your app development needs. So hire a dedicated NodeJS developer from WebClues Infotech with your experience requirement and expertise.
So what are you waiting for? Get your app developed with strong performance parameters from WebClues Infotech
For inquiry click here: https://www.webcluesinfotech.com/hire-nodejs-developer/
Book Free Interview: https://bit.ly/3dDShFg
#hire dedicated node.js developers #hire node.js developers #hire top dedicated node.js developers #hire node.js developers in usa & india #hire node js development company #hire the best node.js developers & programmers