1674105180
A well tested and comprehensive Golang statistics library / package / module with no dependencies.
If you have any suggestions, problems or bug reports please create an issue and I'll do my best to accommodate you. In addition simply starring the repo would show your support for the project and be very much appreciated!
go get github.com/montanaflynn/stats
All the functions can be seen in examples/main.go but here's a little taste:
// start with some source data to use
data := []float64{1.0, 2.1, 3.2, 4.823, 4.1, 5.8}
// you could also use different types like this
// data := stats.LoadRawData([]int{1, 2, 3, 4, 5})
// data := stats.LoadRawData([]interface{}{1.1, "2", 3})
// etc...
median, _ := stats.Median(data)
fmt.Println(median) // 3.65
roundedMedian, _ := stats.Round(median, 0)
fmt.Println(roundedMedian) // 4
The entire API documentation is available on GoDoc.org or pkg.go.dev.
You can also view docs offline with the following commands:
# Command line
godoc . # show all exported apis
godoc . Median # show a single function
godoc -ex . Round # show function with example
godoc . Float64Data # show the type and methods
# Local website
godoc -http=:4444 # start the godoc server on port 4444
open http://localhost:4444/pkg/github.com/montanaflynn/stats/
The exported API is as follows:
var (
ErrEmptyInput = statsError{"Input must not be empty."}
ErrNaN = statsError{"Not a number."}
ErrNegative = statsError{"Must not contain negative values."}
ErrZero = statsError{"Must not contain zero values."}
ErrBounds = statsError{"Input is outside of range."}
ErrSize = statsError{"Must be the same length."}
ErrInfValue = statsError{"Value is infinite."}
ErrYCoord = statsError{"Y Value must be greater than zero."}
)
func Round(input float64, places int) (rounded float64, err error) {}
type Float64Data []float64
func LoadRawData(raw interface{}) (f Float64Data) {}
func AutoCorrelation(data Float64Data, lags int) (float64, error) {}
func ChebyshevDistance(dataPointX, dataPointY Float64Data) (distance float64, err error) {}
func Correlation(data1, data2 Float64Data) (float64, error) {}
func Covariance(data1, data2 Float64Data) (float64, error) {}
func CovariancePopulation(data1, data2 Float64Data) (float64, error) {}
func CumulativeSum(input Float64Data) ([]float64, error) {}
func Entropy(input Float64Data) (float64, error) {}
func EuclideanDistance(dataPointX, dataPointY Float64Data) (distance float64, err error) {}
func GeometricMean(input Float64Data) (float64, error) {}
func HarmonicMean(input Float64Data) (float64, error) {}
func InterQuartileRange(input Float64Data) (float64, error) {}
func ManhattanDistance(dataPointX, dataPointY Float64Data) (distance float64, err error) {}
func Max(input Float64Data) (max float64, err error) {}
func Mean(input Float64Data) (float64, error) {}
func Median(input Float64Data) (median float64, err error) {}
func MedianAbsoluteDeviation(input Float64Data) (mad float64, err error) {}
func MedianAbsoluteDeviationPopulation(input Float64Data) (mad float64, err error) {}
func Midhinge(input Float64Data) (float64, error) {}
func Min(input Float64Data) (min float64, err error) {}
func MinkowskiDistance(dataPointX, dataPointY Float64Data, lambda float64) (distance float64, err error) {}
func Mode(input Float64Data) (mode []float64, err error) {}
func NormBoxMullerRvs(loc float64, scale float64, size int) []float64 {}
func NormCdf(x float64, loc float64, scale float64) float64 {}
func NormEntropy(loc float64, scale float64) float64 {}
func NormFit(data []float64) [2]float64{}
func NormInterval(alpha float64, loc float64, scale float64 ) [2]float64 {}
func NormIsf(p float64, loc float64, scale float64) (x float64) {}
func NormLogCdf(x float64, loc float64, scale float64) float64 {}
func NormLogPdf(x float64, loc float64, scale float64) float64 {}
func NormLogSf(x float64, loc float64, scale float64) float64 {}
func NormMean(loc float64, scale float64) float64 {}
func NormMedian(loc float64, scale float64) float64 {}
func NormMoment(n int, loc float64, scale float64) float64 {}
func NormPdf(x float64, loc float64, scale float64) float64 {}
func NormPpf(p float64, loc float64, scale float64) (x float64) {}
func NormPpfRvs(loc float64, scale float64, size int) []float64 {}
func NormSf(x float64, loc float64, scale float64) float64 {}
func NormStats(loc float64, scale float64, moments string) []float64 {}
func NormStd(loc float64, scale float64) float64 {}
func NormVar(loc float64, scale float64) float64 {}
func Pearson(data1, data2 Float64Data) (float64, error) {}
func Percentile(input Float64Data, percent float64) (percentile float64, err error) {}
func PercentileNearestRank(input Float64Data, percent float64) (percentile float64, err error) {}
func PopulationVariance(input Float64Data) (pvar float64, err error) {}
func Sample(input Float64Data, takenum int, replacement bool) ([]float64, error) {}
func SampleVariance(input Float64Data) (svar float64, err error) {}
func Sigmoid(input Float64Data) ([]float64, error) {}
func SoftMax(input Float64Data) ([]float64, error) {}
func StableSample(input Float64Data, takenum int) ([]float64, error) {}
func StandardDeviation(input Float64Data) (sdev float64, err error) {}
func StandardDeviationPopulation(input Float64Data) (sdev float64, err error) {}
func StandardDeviationSample(input Float64Data) (sdev float64, err error) {}
func StdDevP(input Float64Data) (sdev float64, err error) {}
func StdDevS(input Float64Data) (sdev float64, err error) {}
func Sum(input Float64Data) (sum float64, err error) {}
func Trimean(input Float64Data) (float64, error) {}
func VarP(input Float64Data) (sdev float64, err error) {}
func VarS(input Float64Data) (sdev float64, err error) {}
func Variance(input Float64Data) (sdev float64, err error) {}
func ProbGeom(a int, b int, p float64) (prob float64, err error) {}
func ExpGeom(p float64) (exp float64, err error) {}
func VarGeom(p float64) (exp float64, err error) {}
type Coordinate struct {
X, Y float64
}
type Series []Coordinate
func ExponentialRegression(s Series) (regressions Series, err error) {}
func LinearRegression(s Series) (regressions Series, err error) {}
func LogarithmicRegression(s Series) (regressions Series, err error) {}
type Outliers struct {
Mild Float64Data
Extreme Float64Data
}
type Quartiles struct {
Q1 float64
Q2 float64
Q3 float64
}
func Quartile(input Float64Data) (Quartiles, error) {}
func QuartileOutliers(input Float64Data) (Outliers, error) {}
Pull request are always welcome no matter how big or small. I've included a Makefile that has a lot of helper targets for common actions such as linting, testing, code coverage reporting and more.
git checkout -b some-thing
)go test -cover
or make test
)go vet .
or make lint
)git commit -am 'Did something'
)git push origin some-thing
)To make things as seamless as possible please also consider the following steps:
examples/main.go
with a simple example of the new featureREADME.md
documentation section with any new exported APImake coverage
)git rebase -i new-feature
This is not required by contributors and mostly here as a reminder to myself as the maintainer of this repo. To release a new version we should update the CHANGELOG.md and DOCUMENTATION.md.
First install the tools used to generate the markdown files and release:
go install github.com/davecheney/godoc2md@latest
go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest
brew tap git-chglog/git-chglog
brew install gnu-sed hub git-chglog
Then you can run these make
directives:
# Generate DOCUMENTATION.md
make docs
Then we can create a CHANGELOG.md a new git tag and a github release:
make release TAG=v0.x.x
To authenticate hub
for the release you will need to create a personal access token and use it as the password when it's requested.
Author: Montanaflynn
Source Code: https://github.com/montanaflynn/stats
License: MIT license
1673864940
Hello Readers! In this blog we’ll see How to Monitor API’s Using API Analytics. Firstly we’ll see apigee analytics and after that we’ll see that how to Monitor API’s Using API Analytics. So stick to the end with me and see how we can do that.
Let’s get started !!!
Apigee Analytics collects and calculates a wealth of information that flows through API Proxies , You can visualize its data in the apigee UI with predefined analytics dashboards as well as custom reports.
You can also generates secuirty reports to identify security threats to your APIs through apigee UI.
Apigee API Analytics collects and analyzes a broad spectrum of data that flows across API proxies such as Response time,Request latency,Request size,Target errors,API product name,Developer email address,App name,Transaction revenue.
For a complete listing of data collected by API Analytics, see Analytics metrics, dimensions, and filters reference.
Now let’s Monitor API’s Using API Analytics.
As you can see in the above image API Analytics covers :
Apigee Anaytics includes :
Apigee Analytics Dashboards helps us to see and detect changes in the API ecosystem and also helps us to visualize analytics data. Various predefined analytics dashboards Apigee UI contains are as follows :
Now let’s see these dashboards one by one.
This dashboard helps us to see API proxy traffic patterns and processing times.It also helps us in visualizing how much traffic APIs generate how long it takes for API calls to be processed.
To access the API Proxy Performance Dashboard:
The dashboard opens as shown below:
The above image shows the last 7 days of Proxy Performance of all the Proxies .This dashboard includes these charts:
This dashboard provides several types of interactivity as the list area to the right of each chart is interactive. Select a line in the list to toggle its view in the chart
NOTE: To know more about this CLICK
2. Cache Performance:
It helps us to see a glance the value of your Apigee cache. It also helps us to visualize the benefit of the cache in terms of lower latency and reduced load backend servers.
To access the API Proxy Performance Dashboard:
This dashboard measure metrics as follows:
Average cache hit rate |
All cache hits |
Cache hits by app |
Average time with cache |
Average time without cache |
Cache improvement |
NOTE: to know more about cache performance CLick
Error code analysis tells you about the amount of error rates for API proxies and targets. The Error Code Analysis dashboard uses:
NOTE: Errors reported on the Error Code Analysis dashboard might be different from errors reported on the API Proxy Performance dashboard because that dashboard calculates error rates based on the is_error
flow variable.
To access the API Proxy Performance Dashboard:
The dashboard for Error Code Analysis looks like :
This dashboard measures :
As in the above image you see the Error Code Analysis Dashboard helps you see the error composition , proxy errors and proxy error by response code for all the proxies you have in your apigee.
NOTE: to know more about error code analysis as its various metrics follow Link
The Target Performance dashboard helps you visualise traffic patterns and performance metrics for API proxy backend targets.
To access the Target Performance dashboard:
The dashboard opens as shown below:
It measures the fields as :
NOTE: to know more about this dashboard Click
5. Latency Analysis
The Latency Analysis dashboard can alert you to any latency issues your API proxies may be experiencing. It displays latency measurements down to the window of a minute, highlighting the median, 95th percentile, and 99th percentile values.
To access the Latency Analysis dashboard:
The dashboard opens as shown below:
This dashboard measures metrics as Response time, Target response time, Response processing latency and Request processing latency .
NOTE: to know more Click
The Geo Map dashboard tracks traffic patterns, error patterns, and quality of service across geographical locations.
To access the GeoMap dashboard:
This dashboard helps you assess:
The dashboard opens as shown below:
This dashboard measure traffic , error count , Average response tine and Average target response time .
NOTE: Click here to know more about geomapping.
7. Devices Dashboard
The Devices dashboard tells you about the devices and servers that are being used to access your APIs.
To access the Devices dashboard:
The dashboard opens as shown below:
Ready to gain a competitive advantage with Future Ready Emerging Technologies?
This dashboard measures the metrics as:
NOTE: To know more CLICK
8. Traffic Composition
The Traffic Composition dashboard measures the relative contribution of the top APIs, apps, developers, and products to your overall API program.
To access the Traffic Composition dashboard:
The dashboard opens as shown below:
This dashboard measures metrics as:
NOTE: Click here to know more about Traffic Composition.
To access the custom reports page using the Apigee UI:
The custom reports page displays all custom reports that have been created for your organisation, as shown in the following figure:
the custom reports page enables you to:
NOTE: One can generate custom reports for these metrics :
Follow Link : https://cloud.google.com/apigee/docs/api-platform/analytics/analytics-reference
Some examples to create custom reports for above metrics are as :
Average Transaction Per Second
It means API proxy requests , per second .Custom report for average transaction per second looks like :
Cache Hit
It means the Number of successful API requests that use the ResponseCache
instead of the response from the target service.Custom Report for cache hit look likes:
It means that the Total number of API calls processed by Apigee in the specified time period. THe traffic by proxy custom reports generates data as :
API Error Report
API Error Report means the Total number of times API proxies failed over the specified time period . THe reports shows data as :
It means the Amount of time in milliseconds, that it takes Apigee to process API responses. It’s report look like:
Policy Errors
It means that the Total number of policy errors over the specified time period.Custom Reports to check the occurrence of policy errors in the API call displays as:
NOTE : We can create Custom Reports for other metrics to see the exact data we want to see for our proxies monitoring .
So, In this blog we have seen that how we can monitor api’s using API analytics. Hope this blog is helpful and you have learned a new thing to achieve and to monitor api’s performance.
Thank You !!!
Happy Learning !!!
Original article source at: https://blog.knoldus.com/
1671268680
Video Analytics enables a rapidly growing number of embedded video products such as smart cameras and intelligent digital video recorders with automated capabilities that would have required human monitoring just a few years ago. Broadly, it is the production of meaningful and relevant information from digital video. As opposed to video compression, which attempts to exploit the redundancy in digital video to reduce the size, analytics is concerned with understanding the video's content.
It solutions build upon research incomputer vision, pattern analysis, and machine intelligence and span several industry segments, including surveillance analytics solutions, retail analytics solutions, and transportation analytics solutions. It is also called video content analysis (VCA) or intelligent video. Artificial intelligence (AI) and machine learning (ML) are emerging as launch intelligence factors to generate value from it. AI for video analytics is breaking through builds units, supply chains, workplaces, and retail safety value to automate and support analytical operations.
With large-scale experience in deploying AI development services for video analytics and development in IoT devices and applications, channelizing data generation has become relatively straightforward through it. Artificial intelligence and deep learning technologies open better opportunities for businesses to drive value without direct programming. These models can be trained with a high video footage capacity to identify, classify, tag automatically, and label specific objects.
Listed below are the Use-Cases.
Described below are the brief use-cases of video analytics in the workplace.
A facial recognition system is a technology-efficient parallel to a human face from a digital image or a video proportion against a database of faces, typically employed to verify users through ID verification services, works by discovering and measuring facial features from a given image. It is a method for identifying an unauthorized /unknown person or verify a specific person's identity from their face. It's a computer vision branch, but face recognition is specialized and comes with social baggage for some applications. For face recognition, the algorithm notes specific essential measurements on the face like the color and size and slant of eyes, the gap between eyebrows, etc. All these put together define the looks encoding the information obtained from the image used to identify the particular face. And it allows tracking and managing authorized/unauthorized persons. Through that, we detect the person, whether it is permitted or not, and when the system sees the person is unknown, an alert will generate.
Behavior detection is a method to detect the person's behavior towards the property of the building, organization, etc. here; the system will monitor each person's behavior and generate an alert if they seem to harm another person or infrastructure of the building. Through that, the workplace will detect a person in building how they behave in the workplace. They can see those people coming from outside for interviews or other purposes in the building. We can detect how their behavior in the workplace or harm to the place through our detection system. If any action is wrong, the system will alert the person and the building authority.
Person Tracking is an essential domain in computer vision. It involves the process of tracking a person across a series of frames. We would start with all possible detections in a frame and give them an ID for people tracking. In subsequent edges, we try to carry forward a person's ID. If the person has moved away from the edge, then that ID is dropped. If a new person appears, then they start with a unique ID. Another purpose of tracking people will be if the person is an outsider can in the workplace so by giving him the ID when he/she has arrived in the workplace so form that we can track the person in a building by his/her id, and you can see the person where and what they are doing.
Crowd analysis/detection and scene understanding have drawn a lot of attention recently because it has a broad range of video surveillance applications. Besides surveillance, crowd scenes also exist in movies, TV shows, personal video collections, and even videos shared through social media. Since crowd scenes have many people accumulated with frequent and heavy closure, many existing detection, tracking, and activity recognition technologies, which are only applicable to sparse settings, do not work well in crowded locations.
For People Presence or People Count with the system, you will analyze the person's attendance and track the people in the building. To get the information about the person at what time he/she has come to the building. You can also detect the number of persons in the building, and the workplace will get the details of each person in the building.
It allows analyzing the presence and time management of the person in a building. A person has a period of his job, i.e., shift timing, so through which system has information about person's activity or record. When a person exceeds their allotted time, it sends alerts to the system and the respective person. It will also track the number of authorized/unauthorized persons exceeding their time in the building.
The powerful visual for recognizing the object/person in images in the restricted area and however to be able to information, we need reliable ways of detecting fragments of object boundaries, a complex problem in itself. It will track the restricted zone entry, such as how many people are visiting the restricted areas daily, weekly or monthly, what is the time when they see the restricted/local area. The Boundary Box/Detection in the building will track the person in the restricted zone entry and through which you can analyze the person details who is going in the restricted area. The system will send the alerts to the person who has gone to the local area on the system, and you can see the boundary box on the regions and the person.
To solve these problems, Xenonstack came with a Video Analytics solution to make workplaces productive and safe using AI. The system detects a person; based on appearance; it recognizes the visitors or regular employees. Then accordingly, monitor access management, behavior analysis, anomaly detection, and alert management to make the system more reliable. It analyzes crowd and presence using self-learning analytics. Besides, it monitors suspicious activities and sends an alert to the respective authorities for the actions if encountered. Thus the system will monitor the whole process using it. The solution will cover the following scenario to solve a problem:
Today, machines can automatically verify identity information for secure transactions, surveillance and security tasks, access control to buildings, etc. These applications usually work in controlled environments, and recognition algorithms can take advantage of the environmental constraints to obtain high recognition accuracy. However, next-generation face recognition systems will have widespread applications in intelligent environments where computers and machines are more like helpful assistants.
Original article source at: https://www.xenonstack.com/
1671075191
Confused by online transaction processing (OLTP) and online analytical processing (OLAP) in the world of databases? Fear not. We have a simple explanation right here.
To understand the differences between OLTP and OLAP, we first need to understand where they fit into the world of data and databases. And the answer is “data warehousing”.
A data warehouse is a system used for reporting and data analysis. They are central repositories of data from one or more disparate sources, including relational databases.
In online transaction processing (OLTP), tables store individual details like orders, customer information, shipping address, etc. When you access the data, typically you are filtering out the vast majority of records to focus only on those records that concern you.
In online analytical processing (OLAP), you are typically creating a report across various tables from numerous sources. OLAP may scan many tables and aggregate data across a huge number of records. With OLTP, we expect a response in a fraction of a second; with OLAP, getting the response may require much more time.
An OLTP system captures transaction data that it stores in an OLTP database. Each transaction may involve several database records with multiple fields. OLTP focuses on processing quickly and delivering immediate responses. Tables in OLTP databases are updated frequently. If a transaction fails, the system logic that is built into OLTP must ensure data integrity. OLTP data needs to be ATOMIC, CONSISTENT, ISOLATED, AND DURABLE (ACID). This ensures that each transaction will still be valid if unexpected errors occur. Good OLTP database design keeps redundant and duplicate data to a minimum.
OLAP is designed for quick, efficient reporting and the analysis of large quantities of data. It uses complex queries on very large amounts of historical data. This data has been aggregated from various sources that likely include OLTP databases. OLAP is used for analytics, reporting, data mining, and Business Intelligence. OLAP queries should have low response times. Query failure in OLAP will not affect transaction processing, but it may delay or reduce the accuracy of the information extracted from the OLAP database.
Extract-Transform-Load (ETL) is often used to extract data from OLTP sources, transform it, and load it into an OLAP system.
If you'd like more details about data warehouse terminology and design, see our articles on OLAP FOR OLTP PRACTITIONERS and WHAT YOU NEED TO KNOW ABOUT DATA WAREHOUSES.
A simple example of a NORMALIZED transactional database model is the link between an order and the products included in that order. One table stores all product data, one table stores order data, and a third table links each order to the items (products) that it contains.
In such a model, there is no duplication of data from the Product table into the Order table. We can create several products in the product table, then create orders that contain different combinations of products. The tables required for these transactions would look something like this:
In contrast, data warehouses generally have a STAR SCHEMA DESIGN that is based on fact and dimension tables; the star schema is the simplest model used in OLAP.
Fact tables contain reporting data that has been aggregated from related dimension tables. Its columns store values and reference dimension tables via foreign keys. When designing a fact table, you should keep in mind the concept of SPARSITY.
Dimension tables describe the data that is stored. Each dimension table has a category like orders, employees, and stores. Data warehouses use denormalized data; in other words, they have redundant data for the sake of performance.
Here’s an example of a star schema with FACT and DIMENSION tables:
The good news is that Vertabelo can help you create a database model for both OLTP and OLAP. As you see above, with Vertabelo, you can create models from simple to complex for both transaction processing and analytics.
The answer to that question will depend on the usage that will be made of the data. What are you going to do with the information stored in the database? What’s its business purpose?
To put it in technical terms, is the APPLICATION ORIENTED TO ONLINE TRANSACTION PROCESSING (OLTP) OR ONLINE ANALYTICAL PROCESSING (OLAP)? Your data model should be NORMALIZED FOR OLTP and DENORMALIZED FOR OLAP. That being said, you should only denormalize when you have a reason to do so. Denormalization leads to redundant data that will be difficult to maintain.
Online transactional processing (OLTP) is used for CRUD (Create, Read, Update, Delete) applications, while online analytical processing (OLAP) is used for reporting and Business Intelligence. The main difference between the underlying transactional and analytical databases is the nature of the application using the data.
A transactional database is designed for an application in which the user is more interested in CRUD, i.e. creating, reading, updating, and deleting records. An analytical database is designed for an application in which the user is more interested in analysis, reporting, and forecasting. In OLAP, the focus is to store data for analysis with no user maintenance of data. Thus, analytical databases rarely have inserts and updates. The goal of an analytical database is to analyze data quickly. When designing a data model, consider the usage of the data.
Vertabelo won't help you decide which type of database model you need, but once you have figured that out, the database modeler can help you create both OLTP and OLAP models. If you are not sure how, check out other articles related to OLTP (there are tons of articles on this, but you might start with OLAP FOR OLTP PRACTITIONERS) and OLAP: FACT TABLES, DIMENSION TABLES, STAR SCHEMAS, SNOWFLAKE SCHEMAS, and DATA WAREHOUSING, among other things.
First off, there is not a single type of database model that you should always use. As mentioned above, one main distinction is OLTP vs. OLAP; the one you choose will depend on how your application uses the data in your database.
Criteria | OLTP | OLAP |
---|---|---|
Purpose | Process transactions quickly. | Business intelligence or reporting. |
Characteristic | Handles a large number of small transactions. | Handles large volumes of data. |
Query type | Optimized for all kinds of simple standard queries, typically filtering for a few records. | Optimized for complex queries that aggregate multiple fact and dimension tables. |
SQL Operations | INSERT, UPDATE, DELETE statements to update data and SELECT to read it. | SELECT statements to aggregate data. |
Update | Short, fast updates are initiated by the user. Processes all kinds of frequently-occurring updates (insert, update, and delete). | Data is periodically refreshed via scheduled, long-running batch jobs. Optimized for reading data, typically run on a less frequent basis than OLTP. |
Performance | Queries should execute for immediate response to requests, i.e. in milliseconds. | Queries execute in seconds, minutes, or hours, depending on the amount of data to process. |
Data model type | NORMALIZED MODELS, LIKE BCNF, with many tables. | Denormalized model, usually with fewer tables and often based on a STAR SCHEME, SNOWFLAKE SCHEME, or similar. |
Design | Industry-specific, such as retail, manufacturing, or banking. | Subject-specific, such as sales, inventory, or marketing. |
Data quality | Efforts to ensure ACID COMPLIANCE. | Data may not be organized: what matters is the ability to manage the dimensions of the data. |
Space requirements | Space will depend on the number of transactions to be processed and the length of online storage. Generally smaller than OLAP if historical data is archived. | Generally large, due to the aggregation of large datasets. |
Availability | Generally, 24x7x365 is essential when transactions are performed every second of every day. | Interactions are less frequent; the absence of an OLAP system should not impact operations. |
Use Case Examples | Operational: Applications used concurrently by many users, such as order entry, financial transactions, customer relationship management, and retail sales. Examples are online ticket bookings, banking, e-commerce websites, fintech, and other businesses where there are thousands or millions of transactions per day. | Informational: Trend analysis and data patterns, predicting risks and outcomes, generating reports, and tracking customer behavior and buying patterns. Examples include creating sales and marketing reports, preparing forecasts, and business process management. |
In short, OLTP provides a record of transactional activity; OLAP provides insights from that data over time.
Both online transaction and analytic processing are essential parts of business data management. OLTP and OLAP are complementary to each other, as analytics can only be carried out based on effective OLTP systems. Based on the analytics requirements, an OLTP data model may need to be designed differently or re-designed to support changes in trends.
Original article source at: https://www.vertabelo.com
1670711520
Geospatial Analytics is related to data that is used for locating anything on the globe, an uber driver to a man in a new neighbourhood place everybody uses its data in some way or the other. Its technology involves GPS (global positioning systems), GIS (geographical information systems), and RS (remote sensing). This blog we will explore the topic in depth. We start with the basics and then deep dive into all the details.
It is necessary for so many things and is used daily for various reasons. From commuting purposes for an ordinary man to data in missiles of a defence organization of a particular county, everything requires its data. It is extracted from various resources. Every phone having an active internet connection somehow adds up to contributing to geospatial data, satellites collect data daily. It is of great use in everyday life, and so it requires a significant amount of attention. It can be used for various reasons, to help support natural hazards and, to know of disasters, global climate change, wildlife, natural resources, etc. It is used for satellite imagery too that could be for tactical or for weather forecasting purposes. Many tech giants like uber etc. use it on daily bases to help ease everyday life. A company has to be efficient in extracting the data efficiently and use it, to stand out in the market.
Various methods could do this, but mainly Presto and hives are used to extract and reform the data that's present in hundreds of petabyte and use it efficiently and make the lives of billions easy. This data is vital as it touches the mass majority and is used every second. GIS is a part of its data that helps in the collection, storage, manipulation, analyzation, and present spatial data. Whatever the situation is going on at local, regional or national level, if where is asked for it come to play. It wouldn't be effective without Visualization.
Presto is an open-source distributed SQL query, used to solve the question of any size or type. It runs on Hadoop. It supports many non-relational resources and Teradata. It can query data on its respective location, without moving the actual data to any separate system. The execution of the query runs parallel over a pure memory-based architecture, with most results returning within seconds. Many tech giants use it. It's a popular choice for undertaking interactive queries that are in data ranging in100s of PetaByte.
It is a data warehouse infrastructure tool to process any structured data and developed on top of the Hadoop distributed file system. It resides on top of Hadoop to summarize Big Data and makes querying and analyzing of any kind of data accessible.
It is an ETL and Data Warehousing tool built on top of the Hadoop. It helps to perform many operations secure like :
It supports all the application written in languages like Java, Python, C++ etc. It is using Thrift, JDBC and ODBC drivers. It's easy to write its client application in the desired language. Its clients are categorized into three types:-
It provides with various services like -
There is two central part in it: Coordinator and Worker. It is an open-source distributed system that can be run on multiple machines. Its distributed SQL query engine was built for fast analytic queries. Its deployment will include one Coordinator and any number of it.
The key components of presto are:
It is the brain of any installation; it manages all the worker nodes for all the work comes related to queries. It gets results from workers and returns the final output to the client. It connects with workers and clients via REST.
It helps to execute the task and to process the data. These nodes share data amongst each other and get data from the Coordinator.
It contains information related to data, such as where the data is located, where the schema is located and the data source.
It is similar to what it means in a relational database. The table is set of rows organized into named columns and schema is what you use to hold your tables.
lt issued to help it to integrate with the external data source.
To execute a query, Presto breaks it up into steps.
Stages are implemented as a series of functions that might get distributed on Workers.
Tasks contains one or more parallel drivers, and they are operators in memory. An operator consumes, transforms and produces data.
The deployment strategies for Hive are listed below:
Amazon EMR is used to deploy its megastore. User can opt from three configurations that Amazon has to offer, namely – Embedded, Local or Remote. There are two options for creating an external Hive megastore for EMR:
Apache Hive on Cloud Dataproc provides an efficient and flexible way by storing data of it in Cloud Storage and hosting its metastore in MySQL database on the Cloud SQL. It offers some advantages like flexibility and agility by letting user tailor cluster configuration for specific workloads and scale the cluster according to the need. It also helps in saving cost.
The deployment strategies for Presto
Amazon EMR allows to quickly spin up a managed EMR cluster with a presto query engine and run interactive analysis on the data stored in Amazon S3. It is used to run interactive queries. Its implementation can be built on the cloud on Amazon Web Services. Amazon EMR and Amazon Athena provides with building and implementation of it.
The cluster that includes its component can easily prepare in Presto.
The various ways to optimise are described below:
The advantages of Hive and Presto are:
Modelling geospatial data has quite many complexities. Well, Known Texts are used to model different locations on the map. Various types like point and polygon shapes are used for these purposes. The Spatial Library is used for spatial processing in it with User-Defined Functions and SerDes. Through allowing this library in it, queries may be created using its Query Language (HQL), which is somewhat close to SQL. You will, therefore, stop complex MapReduce algorithms and stick to a more common workflow. Its plugin is running in production at Uber. All GeoSpatial traffic at Uber, more than 90% of it is completed within 5 minutes. Compared with brute force its MapReduce execution, Uber's Geospatial Plugin is more than 50X faster, leading to greater efficiency.
Presto has the edge over Hive as it can be used to process unstructured data too, and query processing in it is faster than that in it. The data is collected in a humongous amount daily, and it needs to be extracted efficiently and judiciously to have better working software that requires it.
Original article source at: https://www.xenonstack.com/
1670514300
You will be surprised by the fact that each day we are producing more data in 2 days than decades of history. Yes, that’s true, and most of us do not even realize this thing that we produce so much data just by browsing on the Internet. If you don’t want the future technologies to catch you off guard, pay attention to these current trends in big data analytics and succeed!
Traditionally the Data is stored in data stores, developed to obtain by particular applications. When the SaaS (software as a service) was popular, Daas was just a beginning. As with Software-as-a-Service applications, Data as a service uses cloud technology to give users and applications with on-demand access to information without depending on where the users or applications may be. Data as a Service is one of the current trends in big data analytics and will deliver it simpler for analysts to obtain data for business review tasks and easier for areas throughout a business or industry to share data.
Responsible and Scalable AI will enable better learning algorithms with shorter time to market. Businesses will achieve a lot more from AI systems like formulating processes that can function efficiently. Businesses will find a way to take AI to scale, which has been a great challenge till now.
Big data analytics has always been a fundamental approach for companies to become a competing edge and accomplish their aims. They apply basic analytics tools to prepare it and discover the causes of why specific issues arise. Predictive methods are implemented to examine modern data and historical events to know customers and recognize possible hazards and events for a corporation. Predictive analysis in big data can predict what may occur in the future. This strategy is extremely efficient in correcting analyzed assembled data to predict customer response. This enables organizations to define the steps they have to practice by identifying a customer’s next move before they even do it.
Using current time technology can take a lot of time to process a huge amount of data. Whereas, Quantum computers, calculate the probability of an object's state or an event before it is measured, which indicates that it can process more data than classical computers. If only we compress billions of data at once in only a few minutes, we can reduce processing duration immensely, providing organizations the possibility to gain timely decisions to attain more aspired outcomes. This process can be possible using Quantum computing. The experiment of quantum computers to correct functional and analytical research over several enterprises can make the industry more precise.
Running processes and moving those processes to a local system such as any user's system or IoT device or a server defines Edge Processing. Edge computing brings computation to a network's edge and reduces the amount of long-distance connection that has to happen between a customer and a server, which is making it the latest trends in big data analytics. It provides a boost to Data Streaming, including real-time data Streaming and processing without containing latency. It enables the devices to respond immediately. Edge computing is an efficient way to process massive data by consuming less bandwidth usage. It can reduce the development cost for an organization and help the software run in remote locations.
Natural Language Processing( NLP) lies inside artificial intelligence and works to develop communication between computers and humans. The objective of NLP is to read, decode the meaning of the human language. Natural language processing is mostly based on machine learning, and it is used to develop word processor applications or translating software. Natural Language Processing Techniques need algorithms to recognize and obtain the required data from each sentence by applying grammar rules. Mostly syntactic analysis and semantic analysis are the techniques that are used in natural language processing. Syntactic analysis is the one that handles sentences and the grammatical issues, whereas semantic analysis handles the meaning of the data/text.
A cloud computing system utilizes an on-premises private cloud and a third party public cloud with orchestration between two interfaces. Hybrid cloud provides excellent flexibility and more data deployment options by moving the processes between private and public clouds. An organization must have a private cloud to gain adaptability with the aspired public cloud. For that, it has to develop a data center, including servers, storage, LAN, and load balancer. The organization has to deploy a virtualization layer/hypervisor to support the VMs and containers. And, install a private cloud software layer. The implementation of software allows instances to transfer data between the private and public clouds.
Dark data is the data that a company does not use in any analytical system. The data is gathered from several network operations that are not used to determine insights or for prediction. The organizations might think that this is not the correct data because they are not getting any outcome from that. But, they know that this will be the most valuable thing. As the data is growing day-by-day, the industry should understand that any unexplored data can be a security risk. The expansion in the amount of Dark Data can be seen as another Trend.
Data fabric is an architecture and collection of data networks. That provides consistent functionality across a variety of endpoints, both on-premises and cloud environments. To drive digital transformation, Data Fabric simplifies and incorporates data storage across cloud and on-premises environments. It enables access and sharing of data in a distributed data environment. Additionally provides consistent data management framework across un-siloed storage.
The aim of XOps (data, ML, model, platform) is to achieve efficiencies and economies of scale. XOps is achieved by implementing DevOps best practices. Thus, ensuring efficiency, reusability, and repeatability while reducing technology, process replication and allowing automation. These innovations would enable prototypes to be scaled, with flexible design and agile orchestration of governed systems.
Over the years, new technologies in Big Data Analytics are changing continuously. Therefore, businesses need to implement the right trends to stay ahead of their competitors. So, here are the latest trends in it for 2022 and beyond.
Original article source at: https://www.xenonstack.com/
1668005533
神策数据 (Sensors Data),隶属于神策网络科技(北京)有限公司,是一家专业的大数据分析服务公司,大数据分析行业开拓者,为客户提供深度用户行为分析平台、以及专业的咨询服务和行业解决方案,致力于帮助客户实现数据驱动。神策数据立足大数据及用户行为分析的技术与实践前沿,业务现已覆盖以互联网、金融、零售快消、高科技、制造等为代表的十多个主要行业、并可支持企业多个职能部门。公司总部在北京,并在上海、深圳、合肥、武汉等地拥有本地化的服务团队,覆盖东区及南区市场;公司拥有专业的服务团队,为客户提供一对一的客户服务。公司在大数据领域积累的核心关键技术,包括在海量数据采集、存储、清洗、分析挖掘、可视化、智能应用、安全与隐私保护等领域。 More
神策 sensors_analytics_flutter_plugin 插件,封装了神策 iOS & Android SDK 常用 API ,使用此插件,可以完成埋点的统计上报。
在 Flutter 项目的 pubspec.yaml
文件中添加 sensors_analytics_flutter_plugin
依赖
dependencies:
# 添加神策 flutter plugin
sensors_analytics_flutter_plugin: ^2.2.2
执行 flutter packages get 命令安装插件
flutter pub get
请参考神策官网 Flutter 插件集成文档。
扫码加入神策数据开源社区 QQ 群 群号:785122381 | 扫码加入神策数据开源社区微信群 |
---|---|
![]() | ![]() |
扫码关注 神策数据开源社区 | 扫码关注 神策数据开源社区服务号 |
---|---|
![]() | ![]() |
Copyright 2015-2022 Sensors Data Inc.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
Run this command:
With Flutter:
$ flutter pub add sensors_analytics_flutter_plugin
This will add a line like this to your package's pubspec.yaml (and run an implicit flutter pub get
):
dependencies:
sensors_analytics_flutter_plugin: ^2.2.2
Alternatively, your editor might support flutter pub get
. Check the docs for your editor to learn more.
Now in your Dart code, you can use:
import 'package:sensors_analytics_flutter_plugin/sensors_analytics_flutter_plugin.dart';
import 'dart:async';
import 'package:flutter/material.dart';
import 'package:flutter/services.dart';
import 'package:sensors_analytics_flutter_plugin/sensors_analytics_flutter_plugin.dart';
void main() => runApp(MyApp());
class MyApp extends StatefulWidget {
@override
_MyAppState createState() => _MyAppState();
}
class _MyAppState extends State<MyApp> {
String? _distinctId = '';
var parameters;
@override
void initState() {
super.initState();
SensorsAnalyticsFlutterPlugin.init(
serverUrl: "https://sdkdebugtest.datasink.sensorsdata.cn/sa?project=default&token=cfb8b60e42e0ae9b",
autoTrackTypes: <SAAutoTrackType>{
SAAutoTrackType.APP_START,
SAAutoTrackType.APP_VIEW_SCREEN,
SAAutoTrackType.APP_CLICK,
SAAutoTrackType.APP_END
},
networkTypes: <SANetworkType>{
SANetworkType.TYPE_2G,
SANetworkType.TYPE_3G,
SANetworkType.TYPE_4G,
SANetworkType.TYPE_WIFI,
SANetworkType.TYPE_5G
},
flushInterval: 30000,
flushBulkSize: 150,
enableLog: true,
javaScriptBridge: true,
encrypt: true,
heatMap: true,
visualized: VisualizedConfig(autoTrack: true, properties: true),
android: AndroidConfig(maxCacheSize: 48 * 1024 * 1024, jellybean: true, subProcessFlush: true),
ios: IOSConfig(maxCacheSize: 10000),
globalProperties: {'aaa': 'aaa-value', 'bbb': 'bbb-value'});
initPlatformState();
}
// Platform messages are asynchronous, so we initialize in an async method.
Future<void> initPlatformState() async {
String? distinctId = "";
// Platform messages may fail, so we use a try/catch PlatformException.
try {
distinctId = await SensorsAnalyticsFlutterPlugin.getDistinctId;
} on PlatformException {
distinctId = 'Failed to get distinctId.';
}
// If the widget was removed from the tree while the asynchronous platform
// message was in flight, we want to discard the reply rather than calling
// setState to update our non-existent appearance.
if (!mounted) return;
setState(() {
_distinctId = distinctId;
});
}
@override
Widget build(BuildContext context) {
late dynamic tmpResult;
return MaterialApp(
home: Scaffold(
appBar: AppBar(
title: Text('Flutter Plugin for Sensors Analytics.'),
),
body: ListView(
children: <Widget>[
ListTile(
title: Text(_distinctId ?? ""),
onTap: () {},
),
ListTile(
title: Text('This is the official Flutter Plugin for Sensors Analytics.'),
onTap: () {},
),
ListTile(
leading: Icon(Icons.account_circle),
title: Text('注册成功/登录成功时调用 login '),
onTap: () {
SensorsAnalyticsFlutterPlugin.login("flutter_lgoin_test123654", {"hello": "world"});
},
),
ListTile(
leading: Icon(Icons.event),
title: Text('触发激活事件 trackInstallation '),
onTap: () {
SensorsAnalyticsFlutterPlugin.trackInstallation(
'AppInstall', <String, dynamic>{"a_time": DateTime.now(), "product_name": "Apple 12 max pro"});
},
),
ListTile(
leading: Icon(Icons.event),
title: Text('追踪事件 track'),
onTap: () {
print("======触发事件233");
dynamic a = "aaa";
print(a is String);
String? b = "bbb";
dynamic c = b;
print(c.runtimeType);
print(c is String);
print(c is! String);
print(c is String?);
print("======");
dynamic d = null;
print(d.runtimeType);
print(d is String);
print(d is! String);
print(d is String?);
// SensorsAnalyticsFlutterPlugin.track(
// 'ViewProduct', <String, dynamic>{
// "a_time": DateTime.now(),
// "product_name": "Apple 12 max pro"
// });
var map = { "address": "beijing"};
SensorsAnalyticsFlutterPlugin.track("hello", map);
},
),
ListTile(
leading: Icon(Icons.assessment),
title: Text('设置用户属性 profileSet2'),
onTap: () {
SensorsAnalyticsFlutterPlugin.profileSet({'Age': 18, 'Sex': 'Male', "a_time": DateTime.now()});
},
),
ListTile(
leading: Icon(Icons.assessment),
title: Text('设置用户推送 ID 到用户表'),
onTap: () {
SensorsAnalyticsFlutterPlugin.profilePushId("jgId", "12312312312");
},
),
ListTile(
leading: Icon(Icons.assessment),
title: Text('删除用户设置的 pushId'),
onTap: () {
SensorsAnalyticsFlutterPlugin.profileUnsetPushId("jgId");
},
),
ListTile(
title: Text('https://github.com/sensorsdata/sensors_analytics_flutter_plugin'),
onTap: () {},
),
ListTile(
title: Text('set server url'),
onTap: () async {
SensorsAnalyticsFlutterPlugin.setServerUrl(
"https://newsdktest.datasink.sensorsdata.cn/sa?project=zhujiagui&token=5a394d2405c147ca", true);
},
),
ListTile(
title: Text('getPresetProperties'),
onTap: () async {
dynamic map = await SensorsAnalyticsFlutterPlugin.getPresetProperties();
print("getPresetProperties===$map");
},
),
ListTile(
title: Text('enableLog'),
onTap: () async {
SensorsAnalyticsFlutterPlugin.enableLog(false);
print("enableLog==333=");
},
),
ListTile(
title: Text('setFlushNetworkPolicy'),
onTap: () async {
SensorsAnalyticsFlutterPlugin.setFlushNetworkPolicy(<SANetworkType>{SANetworkType.TYPE_WIFI});
print("setFlushNetworkPolicy===");
},
),
ListTile(
title: Text('setFlushInterval'),
onTap: () async {
SensorsAnalyticsFlutterPlugin.setFlushInterval(60 * 1000);
print("setFlushInterval===");
},
),
ListTile(
title: Text('getFlushInterval'),
onTap: () async {
dynamic result = await SensorsAnalyticsFlutterPlugin.getFlushInterval();
print("getFlushInterval===$result");
},
),
ListTile(
title: Text('setFlushBulkSize'),
onTap: () async {
SensorsAnalyticsFlutterPlugin.setFlushInterval(60 * 60 * 1000);
SensorsAnalyticsFlutterPlugin.setFlushBulkSize(100);
print("setFlushBulkSize===");
dynamic result = await SensorsAnalyticsFlutterPlugin.getFlushBulkSize();
print("getFlushBulkSize===$result");
for (int index = 0; index <= 100; index++) {
SensorsAnalyticsFlutterPlugin.track(
'ViewProduct2', <String, dynamic>{"a_time": DateTime.now(), "product_name": "Apple 12 max pro"});
}
print("track end=====");
},
),
ListTile(
title: Text('getFlushBulkSize'),
onTap: () async {
dynamic result = await SensorsAnalyticsFlutterPlugin.getFlushBulkSize();
print("getFlushBulkSize===$result");
},
),
ListTile(
title: Text('getAnonymousId'),
onTap: () async {
dynamic result = await SensorsAnalyticsFlutterPlugin.getAnonymousId();
print("getAnonymousId===$result");
},
),
ListTile(
title: Text('getLoginId'),
onTap: () async {
//SensorsAnalyticsFlutterPlugin.login("aa212132");
dynamic result = await SensorsAnalyticsFlutterPlugin.getLoginId();
print("getLoginId===$result");
},
),
ListTile(
title: Text('identify'),
onTap: () async {
SensorsAnalyticsFlutterPlugin.identify("qwe");
print("identify===");
},
),
ListTile(
title: Text('trackAppInstall'),
onTap: () async {
SensorsAnalyticsFlutterPlugin.trackAppInstall({"age": 888}, false);
print("trackAppInstall==");
},
),
ListTile(
title: Text('trackTimerStart'),
onTap: () async {
tmpResult = await SensorsAnalyticsFlutterPlugin.trackTimerStart("hello_event");
print("trackTimerStart===$tmpResult");
},
),
ListTile(
title: Text('trackTimerPause'),
onTap: () async {
SensorsAnalyticsFlutterPlugin.trackTimerPause("hello_event");
print("trackTimerPause===");
},
),
ListTile(
title: Text('trackTimerResume'),
onTap: () async {
SensorsAnalyticsFlutterPlugin.trackTimerResume("hello_event");
print("trackTimerResume===");
},
),
ListTile(
title: Text('removeTimer'),
onTap: () async {
SensorsAnalyticsFlutterPlugin.removeTimer("hello_event");
print("removeTimer===");
},
),
ListTile(
title: Text('end timer'),
onTap: () async {
SensorsAnalyticsFlutterPlugin.trackTimerEnd(tmpResult, null);
print("end timer===");
},
),
ListTile(
title: Text('flush'),
onTap: () async {
SensorsAnalyticsFlutterPlugin.flush();
print("flush===");
},
),
ListTile(
title: Text('deleteAll'),
onTap: () async {
SensorsAnalyticsFlutterPlugin.deleteAll();
print("deleteAll===");
},
),
ListTile(
title: Text('setsuperproperties'),
onTap: () async {
var map = {"superproperties_test": "flutter 注册公共属性", "aaa": "同名公共属性 aaa"};
SensorsAnalyticsFlutterPlugin.registerSuperProperties(map);
print("setSuperProperties===");
},
),
ListTile(
title: Text('getSuperProperties'),
onTap: () async {
var a = 10;
dynamic map = await SensorsAnalyticsFlutterPlugin.getSuperProperties();
print("getSuperProperties===$map");
},
),
ListTile(
title: Text('enableNetworkRequest'),
onTap: () async {
SensorsAnalyticsFlutterPlugin.enableNetworkRequest(true);
print("enableNetworkRequest===");
},
),
ListTile(
title: Text('itemSet'),
onTap: () async {
SensorsAnalyticsFlutterPlugin.itemSet("aaatype", "aaaid", {"age": 999});
print("itemSet===");
},
),
ListTile(
title: Text('itemDelete'),
onTap: () async {
SensorsAnalyticsFlutterPlugin.itemDelete("aaatype", "aaaid");
print("itemDelete===");
},
),
ListTile(
title: Text('isNetworkRequestEnable'),
onTap: () async {
dynamic result = await SensorsAnalyticsFlutterPlugin.isNetworkRequestEnable();
print("isNetworkRequestEnable===$result");
},
),
ListTile(
title: Text('logout'),
onTap: () async {
SensorsAnalyticsFlutterPlugin.logout();
print("logout===");
},
),
ListTile(
title: Text('bind'),
onTap: () async {
SensorsAnalyticsFlutterPlugin.bind("sss1", "vvv1");
print("bind===");
},
),
ListTile(
title: Text('unbind'),
onTap: () async {
SensorsAnalyticsFlutterPlugin.unbind("sss2", "vvv2");
print("unbind===");
},
),
ListTile(
title: Text('loginwithkey'),
onTap: () async {
SensorsAnalyticsFlutterPlugin.loginWithKey("sss3", "vvv3");
//SensorsAnalyticsFlutterPlugin.loginWithKey("sss3", "vvv3", {"p1111": "vvvv1"});
print("loginwithkey===");
},
),
],
),
),
);
}
}
Download Details:
Author: sensorsdata
Source Code: https://github.com/sensorsdata/sensors_analytics_flutter_plugin
1667185920
Using this package you can easily retrieve data from Google Analytics.
Here are a few examples of the provided methods:
use Analytics;
use Spatie\Analytics\Period;
//fetch the most visited pages for today and the past week
Analytics::fetchMostVisitedPages(Period::days(7));
//fetch visitors and page views for the past week
Analytics::fetchVisitorsAndPageViews(Period::days(7));
Most methods will return an \Illuminate\Support\Collection
object containing the results.
This package can be installed through Composer.
composer require spatie/laravel-analytics
Optionally, you can publish the config file of this package with this command:
php artisan vendor:publish --provider="Spatie\Analytics\AnalyticsServiceProvider"
The following config file will be published in config/analytics.php
return [
/*
* The view id of which you want to display data.
*/
'view_id' => env('ANALYTICS_VIEW_ID'),
/*
* Path to the client secret json file. Take a look at the README of this package
* to learn how to get this file. You can also pass the credentials as an array
* instead of a file path.
*/
'service_account_credentials_json' => storage_path('app/analytics/service-account-credentials.json'),
/*
* The amount of minutes the Google API responses will be cached.
* If you set this to zero, the responses won't be cached at all.
*/
'cache_lifetime_in_minutes' => 60 * 24,
/*
* Here you may configure the "store" that the underlying Google_Client will
* use to store it's data. You may also add extra parameters that will
* be passed on setCacheConfig (see docs for google-api-php-client).
*
* Optional parameters: "lifetime", "prefix"
*/
'cache' => [
'store' => 'file',
],
];
The first thing you’ll need to do is to get some credentials to use Google API’s. I’m assuming that you’ve already created a Google account and are signed in. Head over to Google API’s site and click "Select a project" in the header.
Next up we must specify which API’s the project may consume. In the list of API Library
click "Google Analytics API". On the next screen click "Enable".
Now that you’ve created a project that has access to the Analytics API it’s time to download a file with these credentials. Click "Credentials" in the sidebar. You’ll want to create a "Service account key".
On the next screen you can give the service account a name. You can name it anything you’d like. In the service account id you’ll see an email address. We’ll use this email address later on in this guide.
Select "JSON" as the key type and click "Create" to download the JSON file.
Save the json inside your Laravel project at the location specified in the service_account_credentials_json
key of the config file of this package. Because the json file contains potentially sensitive information I don't recommend committing it to your git repository.
I'm assuming that you've already created a Analytics account on the Analytics site. When setting up your property, click on "Advanced options" and make sure you enable Universal Analytics
.
Go to "User management" in the Admin-section of the property.
On this screen you can grant access to the email address found in the client_email
key from the json file you download in the previous step. Analyst role is enough.
The last thing you'll have to do is fill in the view_id
in the config file. You can get the right value on the Analytics site. Go to "View setting" in the Admin-section of the property.
You'll need the View ID
displayed there.
When the installation is done you can easily retrieve Analytics data. Nearly all methods will return an Illuminate\Support\Collection
-instance.
Here are a few examples using periods
//retrieve visitors and pageview data for the current day and the last seven days
$analyticsData = Analytics::fetchVisitorsAndPageViews(Period::days(7));
//retrieve visitors and pageviews since the 6 months ago
$analyticsData = Analytics::fetchVisitorsAndPageViews(Period::months(6));
//retrieve sessions and pageviews with yearMonth dimension since 1 year ago
$analyticsData = Analytics::performQuery(
Period::years(1),
'ga:sessions',
[
'metrics' => 'ga:sessions, ga:pageviews',
'dimensions' => 'ga:yearMonth'
]
);
$analyticsData
is a Collection
in which each item is an array that holds keys date
, visitors
and pageViews
If you want to have more control over the period you want to fetch data for, you can pass a startDate
and an endDate
to the period object.
$startDate = Carbon::now()->subYear();
$endDate = Carbon::now();
Period::create($startDate, $endDate);
public function fetchVisitorsAndPageViews(Period $period): Collection
The function returns a Collection
in which each item is an array that holds keys date
, visitors
, pageTitle
and pageViews
.
public function fetchTotalVisitorsAndPageViews(Period $period): Collection
The function returns a Collection
in which each item is an array that holds keys date
, visitors
, and pageViews
.
public function fetchMostVisitedPages(Period $period, int $maxResults = 20): Collection
The function returns a Collection
in which each item is an array that holds keys url
, pageTitle
and pageViews
.
public function fetchTopReferrers(Period $period, int $maxResults = 20): Collection
The function returns a Collection
in which each item is an array that holds keys url
and pageViews
.
public function fetchUserTypes(Period $period): Collection
The function returns a Collection
in which each item is an array that holds keys type
and sessions
.
public function fetchTopBrowsers(Period $period, int $maxResults = 10): Collection
The function returns a Collection
in which each item is an array that holds keys browser
and sessions
.
To perform all other queries on the Google Analytics resource use performQuery
. Google's Core Reporting API provides more information on which metrics and dimensions might be used.
public function performQuery(Period $period, string $metrics, array $others = [])
You can get access to the underlying Google_Service_Analytics
object:
Analytics::getAnalyticsService();
Run the tests with:
vendor/bin/phpunit
Please see CHANGELOG for more information what has changed recently.
Please see CONTRIBUTING for details.
If you've found a bug regarding security please mail security@spatie.be instead of using the issue tracker.
And a special thanks to Caneco for the logo ✨
Author: Spatie
Source Code: https://github.com/spatie/laravel-analytics
License: MIT license
1667016590
友盟分析推送Flutter插件(flutter_umeng_analytics_push)
1、自动集成
dependencies:
flutter_umeng_analytics_push: ^1.0.2
2、 本地导入(二选一,下载代码或者git clone)
dependencies:
flutter_umeng_analytics_push:
git:
url: https://github.com/youyiio/flutter_umeng_analytics_push.git
package com.demo.umeng.app
import io.flutter.app.FlutterApplication
import com.beyongx.flutter_umeng_analytics_push.UmengAnalyticsPushFlutterAndroid
class MyFlutterApplication: FlutterApplication() {
override fun onCreate() {
super.onCreate();
UmengAnalyticsPushFlutterAndroid.androidInit(this, "umeng_app_key", "default",
false, "umeng_message_secret")
}
}
package com.demo.umeng.app
import android.os.Handler
import android.os.Looper
import android.content.Intent
import androidx.annotation.NonNull;
import io.flutter.embedding.android.FlutterActivity
import io.flutter.embedding.engine.FlutterEngine
import io.flutter.plugins.GeneratedPluginRegistrant
import com.beyongx.flutter_umeng_analytics_push.UmengAnalyticsPushFlutterAndroid
import com.beyongx.flutter_umeng_analytics_push.UmengAnalyticsPushPlugin
class MainActivity: FlutterActivity() {
var handler: Handler = Handler(Looper.myLooper())
override fun configureFlutterEngine(@NonNull flutterEngine: FlutterEngine) {
GeneratedPluginRegistrant.registerWith(flutterEngine);
}
override fun onNewIntent(intent: Intent) {
// Actively update and save the intent every time you go back to the front desk, and then you can get the latest intent
setIntent(intent);
super.onNewIntent(intent);
}
override fun onResume() {
super.onResume()
UmengAnalyticsPushFlutterAndroid.androidOnResume(this)
if (getIntent().getExtras() != null) {
var message = getIntent().getExtras().getString("message")
if (message != null && message != "") {
// To start the interface, wait for the engine to load, and send it to the interface with a delay of 5 seconds
handler.postDelayed(object : Runnable {
override fun run() {
UmengAnalyticsPushPlugin.eventSink.success(message)
}
}, 5000)
}
}
}
override fun onPause() {
super.onPause()
UmengAnalyticsPushFlutterAndroid.androidOnPause(this)
}
}
<application
android:name="com.demo.umeng.app.MyFlutterApplication">
</application>
修改MyFlutterApplication
package com.demo.umeng.app
import io.flutter.app.FlutterApplication
import com.beyongx.flutter_umeng_analytics_push.UmengAnalyticsPushFlutterAndroid
class MyFlutterApplication: FlutterApplication() {
override fun onCreate() {
super.onCreate();
UmengAnalyticsPushFlutterAndroid.androidInit(this, "uemng_app_key", "default",
false, "uemng_message_secret", false)
// Register Xiaomi Push (optional)
UmengAnalyticsPushFlutterAndroid.registerXiaomi(this, "xiaomi_app_id", "xiaomi_app_key")
// Register Huawei Push (optional, need add other infomation in AndroidManifest.xml)
UmengAnalyticsPushFlutterAndroid.registerHuawei(this)
// Register Oppo Push (optional)
UmengAnalyticsPushFlutterAndroid.registerOppo(this, "oppo_app_key", "oppo_app_secret")
// Register Vivo Push (optional, need add other infomation in AndroidManifest.xml)
UmengAnalyticsPushFlutterAndroid.registerVivo(this)
// Register Meizu Push (optional)
UmengAnalyticsPushFlutterAndroid.registerMeizu(this, "meizu_app_id", "meizu_app_key")
}
}
配置手机厂商的push key
注意在主项目中即可,修改AndroidManifest.xml, 填写appId和key
<application
android:name="com.demo.umeng.app.MyFlutterApplication">
.....
<!-- Vivo push channel start (optional) -->
<meta-data
android:name="com.vivo.push.api_key"
android:value="vivo_api_key" />
<meta-data
android:name="com.vivo.push.app_id"
android:value="vivo_app_id" />
<!-- Vivo push channel end-->
<!-- Huawei push channel start (optional) -->
<meta-data
android:name="com.huawei.hms.client.appid"
android:value="appid=${huawei_app_id}" />
<!-- Huawei push channel end-->
</application>
设备离线下,认可接受离线消息
友盟后台手动测试时;
调用友盟推送接口时,使用下面的参数发送
"mipush": true
"mi_activity": "com.beyongx.flutter_umeng_analytics_push.OfflineNotifyClickActivity"
import UIKit
import Flutter
@UIApplicationMain
@objc class AppDelegate: FlutterAppDelegate {
override func application(_ application: UIApplication, didFinishLaunchingWithOptions launchOptions: [UIApplication.LaunchOptionsKey: Any]?) -> Bool {
GeneratedPluginRegistrant.register(with: self)
UmengAnalyticsPushFlutterIos.iosInit(launchOptions, appkey:"uemng_app_key", channel:"appstore", logEnabled:false, pushEnabled:true);
return super.application(application, didFinishLaunchingWithOptions: launchOptions)
}
// 如果需要处理Push点击,用下面代码
@available(iOS 10.0, *)
override func userNotificationCenter(_ center: UNUserNotificationCenter, didReceive response: UNNotificationResponse, withCompletionHandler completionHandler: @escaping () -> Void) {
let userInfo = response.notification.request.content.userInfo
UmengAnalyticsPushFlutterIos.handleMessagePush(userInfo)
completionHandler()
}
}
#import "GeneratedPluginRegistrant.h"
#import <UMCommon/UMCommon.h>
#import <UMCommon/MobClick.h>
#import <UMPush/UMessage.h>
#import <UserNotifications/UserNotifications.h>
#import <flutter_umeng_analytics_push/UmengAnalyticsPushIos.h>
import 'package:flutter_umeng_analytics_push/flutter_umeng_analytics_push.dart';
import 'package:flutter_umeng_analytics_push/message_model.dart';
FlutterUmengAnalyticsPush.addPushMessageCallback((MessageModel message) {
print("UmengAnalyticsPush Message ======> $message");
});
import 'package:flutter_umeng_analytics_push/flutter_umeng_analytics_push.dart';
FlutterUmengAnalyticsPush.addAlias('1001', 'jobcode');
FlutterUmengAnalyticsPush.setAlias('1002', 'jobcode');
FlutterUmengAnalyticsPush.deleteAlias('1002', 'jobcode');
import 'package:flutter_umeng_analytics_push/flutter_umeng_analytics_push.dart';
FlutterUmengAnalyticsPush.addTags('manager');
FlutterUmengAnalyticsPush.deleteTags('manager');
import 'package:flutter_umeng_analytics_push/flutter_umeng_analytics_push.dart';
FlutterUmengAnalyticsPush.pageStart('memberPage');
FlutterUmengAnalyticsPush.pageEnd('memberPage');
import 'package:flutter_umeng_analytics_push/flutter_umeng_analytics_push.dart';
FlutterUmengAnalyticsPush.event('customEvent', '1000');
Run this command:
With Flutter:
$ flutter pub add flutter_umeng_analytics_push
This will add a line like this to your package's pubspec.yaml (and run an implicit flutter pub get
):
dependencies:
flutter_umeng_analytics_push: ^1.0.2
Alternatively, your editor might support flutter pub get
. Check the docs for your editor to learn more.
Now in your Dart code, you can use:
import 'package:flutter_umeng_analytics_push/flutter_umeng_analytics_push.dart';
Download Details:
Author:
Source Code: https://pub.dev/packages/flutter_umeng_analytics_push
1666785300
A fast, general sparse linear algebra and graph computation package, based on SuiteSparse:GraphBLAS.
using Pkg
Pkg.add("SuiteSparseGraphBLAS")
julia> using SuiteSparseGraphBLAS
# Standard arithmetic semiring (+, *) matrix multiplication
julia> s = sprand(Float64, 100000, 100000, 0.05);
julia> v = sprand(Float64, 100000, 1000, 0.1);
julia> @btime s * v
157.211 s (8 allocations: 1.49 GiB)
julia> s = GBMatrix(s); v = GBMatrix(v);
# Single-threaded
julia> @btime s * v
54.649 s (26 allocations: 1.49 GiB)
# 2 threads
julia> @btime s * v
30.258 s (26 allocations: 1.50 GiB)
# 4 threads
julia> @btime s * v
21.738 s (26 allocations: 1.54 GiB)
# Indexing
julia> s = sprand(Float64, 100000, 100000, 0.05);
julia> @btime s[1:10:end, end:-10:1]
344.355 ms (11 allocations: 76.32 MiB)
julia> s = GBMatrix(s);
julia> @btime s[1:10:end, end:-10:1]
81.750 ms (39 allocations: 152.49 MiB)
If you use SuiteSparseGraphBLAS.jl in your research please cite the following three papers:
pdf:
@article{10.1145/3322125,
author = {Davis, Timothy A.},
title = {Algorithm 1000: SuiteSparse:GraphBLAS: Graph Algorithms in the Language of Sparse Linear Algebra},
year = {2019},
issue_date = {December 2019},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
volume = {45},
number = {4},
issn = {0098-3500},
url = {https://doi.org/10.1145/3322125},
doi = {10.1145/3322125},
journal = {ACM Trans. Math. Softw.},
month = {dec},
articleno = {44},
numpages = {25},
keywords = {GraphBLAS, Graph algorithms, sparse matrices}
}
pdf:
@article{GraphBLAS7,
author = {Davis, Timothy A.},
title = {Algorithm 10xx: SuiteSparse:GraphBLAS: Graph Algorithms in the Language of Sparse Linear Algebra},
year = {2022},
journal = {ACM Trans. Math. Softw.},
month = {(under revision)},
note={See GraphBLAS/Doc/toms_parallel_grb2.pdf},
keywords = {GraphBLAS, Graph algorithms, sparse matrices}
}
pdf:
@INPROCEEDINGS{9622789,
author={Pelletier, Michel and Kimmerer, Will and Davis, Timothy A. and Mattson, Timothy G.},
booktitle={2021 IEEE High Performance Extreme Computing Conference (HPEC)},
title={The GraphBLAS in Julia and Python: the PageRank and Triangle Centralities},
year={2021},
pages={1-7},
doi={10.1109/HPEC49654.2021.9622789},
ISSN={2643-1971},
month={Sep.}
}
This work was funded as part of Google Summer of Code over 3 summers, 2 of which were for Abhinav Mehndiratta and the last of which was for William Kimmerer.
Original author: Abhinav Mehndiratta
SuiteSparse author: Tim Davis
Mentors: Viral B Shah, Miha Zgubic, Tim Davis
Current maintainer: William Kimmerer
Author: JuliaSparse
Source Code: https://github.com/JuliaSparse/SuiteSparseGraphBLAS.jl
License: MIT license
1665780960
PHP Insights was carefully crafted to simplify the analysis of your code directly from your terminal, and is the perfect starting point to analyze the code quality of your PHP projects. It was created by Nuno Maduro, logo design Caneco, and currently is maintained by Chris Gmyr, Jibé Barth, and Steve McDougall.
# First, install:
composer require nunomaduro/phpinsights --dev
# Then, use it:
./vendor/bin/phpinsights
# For Laravel:
First, publish the configuration file:
php artisan vendor:publish --provider="NunoMaduro\PhpInsights\Application\Adapters\Laravel\InsightsServiceProvider"
Then, use it:
php artisan insights
For full documentation, visit phpinsights.com.
Author: Nunomaduro
Source Code: https://github.com/nunomaduro/phpinsights
License: MIT license
1664476440
Easily track user events with Google Analytics. Test UI/UX theories, compare client performance/speed, even track client-side errors. All user events are tied to all other session data in Google Analytics.
You can install gatrack.js via Bower or NPM:
bower install gatrack
npm install gatrack
Or, you can just include the dist/gatrack.min.js
file anywhere on your page.
The API, on load, detects and tracks events for touch, hover, scroll, click, link and load. To specify category or the action being taken (both optional), simply add data-attributes of gatrack-category
and/or gatrack-action
and/or gatrack-label
and/or gatrack-value
.
For an element on which you wish to track click events, add a class of ga-click-trackable
.
For links (internal or outbound) for which you want to track user interaction, add a class of ga-link-trackable
.
For an element on which you wish to track hover events, add a class of ga-hover-trackable
.
For an element on which you wish to track load events, add a class of ga-load-trackable
.
For an element on which you wish to track touch events, add a class of ga-touch-trackable
.
For an element on which you wish to track scroll events, add a class of ga-scroll-trackable
. You need to specfiy the position at which to trigger the event (either percentage amount or pixel distance, '30%' or '300px', by setting data-gatrack-scroll-point
). For this type of event, you can also specify scrolling direction ('x' or 'y', by setting data-gatrack-scroll-direction
) to track , which defaults to 'y', or vertical.
gatrack.action(element, category, action [, label, value, callback(result)])
gatrack.link(element [, category, action, label, value])
gatrack.click(element [, category, action, label, value])
gatrack.load(element [, category, action, label, value])
gatrack.touch(element [, category, action, label, value])
gatrack.hover(element [, category, action, label, value])
gatrack.scrollAt(element, scrollPoint [, scrollDirection, category, action, label, value])
Google Analytics events accept four parameters:
In general, the event hooks look for things like an element id
or title
attribute to assign to the action parameter when one is not specified either explicitly or in the data-attribute of the element.
In the case of the link event, it looks for the href
value in absence of an explicity declaration or data-attribute, and the scrollAt event looks for the page title content.
gatrack.init() is available and can be used to initialize the event listeners on specified elements whenever you like.
The action hook, when given an optional callback function, returns a 'success' string on success and a traditional error object otherwise.
You can read more specifics about the event object in Google Analytics.
You can also track errors on your page through gatrack. All you'll need to do is override the native onerror
function with one for gatrack.
To start recording errors, you simply need to place the following snippet in a script
tag so that it will be the first code executed on your page, preferrably in the head
of your document.
// One-liner, minified (use this one!)
(function(g,a,t,r,a,c,k){g[r]=g[r]||{};g[r][a]=t.getTime();g[r][c]=[];g[c]=function(m,u,l,c,e){this.gatrack.onerror.push([m,u,l,c,e])}})(window,document,(new Date()),'gatrack','timer','onerror');
// Expanded, so you can see
(function(g,a,t,r,a,c,k){
g[r] = g[r] || {};
g[r][a] = t.getTime();
g[r][c] = [];
g[c] = function( m, u, l, c, e ) {
this.gatrack.onerror.push([m, u, l, c, e]);
};
})(window,document,(new Date()),'gatrack','timer','onerror');
This snippet will allow you to record errors that are raised even before any other JavaScript code is executed. The gatrack library records errors in the following format:
You betcha. Check out the provided index.html demo for working examples.
Author: Andjosh
Source Code: https://github.com/andjosh/gatrack.js
1664472420
Server side implementation of Amplitude's HTTP API.
As of 2020-05-30, Amplitude reported issues with their SSL certificate, so they set up an endpoint and alternate endpoint at https://api2.amplitude.com
. Read about it on Amplitude's Status Page and affected devices here.
As of v5.1.0+, you can use the alternative endpoint by setting the environment variable:
AMPLITUDE_TOKEN_ENDPOINT = 'https://api2.amplitude.com'
Or in the constructor:
const amplitude = new Amplitude('api-token', {
tokenEndpoint: 'https://api2.amplitude.com'
})
For amplitude@5+, it uses Amplitude's V2 HTTP API, which replaces the deprecated V1 HTTP API. This only affects the .track
method. The only potential breaking change is by default user_id
and device_id
require a minimum of 5 characters.
npm install amplitude --save
const Amplitude = require('amplitude')
// The only required field is the api token
const amplitude = new Amplitude('api-token')
See the examples/
directory for further usage.
Pass in any keys listed on the Amplitude V2 HTTP API. The only required keys are event_type
and either user_id
or device_id
. If you initialized the Amplitude object with a user/device id, they can be ignored when calling the track method. Note: the user_id
and device_id
must be 5 or more characters if passed.
const data = {
event_type: 'some value', // required
user_id: 'some-user-id', // only required if device id is not passed in
device_id: 'some-device-id', // only required if user id is not passed in
session_id: 1492789357923, // must be unix timestamp in ms, not required
event_properties: {
//...
},
user_properties: {
//...
}
}
try {
await amplitude.track(data)
} catch (err) {
console.error(err)
}
You can also pass an array of event
objects:
const data = [
{
event_type: 'some value', // required
user_id: 'some id', // only required if device id is not passed in
device_id: 'some id', // only required if user id is not passed in
event_properties: {
//...
},
user_properties: {
//...
}
},
{
event_type: 'another value', // required
user_id: 'some id', // only required if device id is not passed in
device_id: 'some id', // only required if user id is not passed in
event_properties: {
//...
},
user_properties: {
//...
}
}
]
amplitude.track(data).then(res => {
console.log('Amplitude response', res)
})
The identify
method allows you to make changes to a user without sending an analytics event.
const data = {
user_id: 'some id', // only required if device id is not passed in
device_id: 'some id', // only required if user id is not passed in
event_properties: {
//...
},
user_properties: {
//...
}
}
amplitude.identify(data).then(res => {
console.log('Amplitude response', res)
})
You can also pass an array of identify
objects:
const data = [
{
user_id: 'some id', // only required if device id is not passed in
device_id: 'some id', // only required if user id is not passed in
event_properties: {
//...
},
user_properties: {
//...
}
},
{
user_id: 'some id', // only required if device id is not passed in
device_id: 'some id', // only required if user id is not passed in
event_properties: {
//...
},
user_properties: {
//...
}
}
]
amplitude.identify(data).then(res => {
console.log('Amplitude response', res)
})
With this method, you can also modify user properties using property operations.
const data = {
user_id: 'some id', // only required if device id is not passed in
device_id: 'some id', // only required if user id is not passed in
user_properties: {
$set: {
//...
},
$add: {
//...
},
$append: {
//...
}
}
}
amplitude.identify(data).then(res => {
console.log('Amplitude response', res)
})
Note the limitation of mixing user property operations with top level properties. If you use any property operations ($add
, $append
, etc.), and you want to set a user property, it must be done using the $set
operation.
If you prefer camelCase variables, you can pass in the camelCase version instead to the track
and identify
methods:
const data = {
eventType: 'some value', // required
userId: 'some id', // only required if device id is not passed in
deviceId: 'some id', // only required if user id is not passed in
sessionId: 1492789357923, // must be unix timestamp in ms, not required
eventProperties: {
//...
},
userProperties: {
//...
}
}
amplitude.track(data).then(res => {
console.log('Amplitude response', res)
})
This is the full list of properties that will be automatically transformed:
userId -> user_id
deviceId -> device_id
sessionId -> session_id
eventType -> event_type
eventProperties -> event_properties
userProperties -> user_properties
appVersion -> app_version
osName -> os_name
osVersion -> os_version
deviceBrand -> device_brand
deviceManufacturer -> device_manufacturer
deviceModel -> device_model
locationLat -> location_lat
locationLng -> location_lng
If the user/device/session id will always be the same, you can initialize the object with it. Passing a user id or device id in the track
and identify
methods will override the default value set at initialization.
const amplitude = new Amplitude('api-token', { user_id: 'some-user-id' })
// or
const amplitude = new Amplitude('api-token', { device_id: 'some-device-id' })
// or
const amplitude = new Amplitude('api-token', { session_id: 1492789357923 })
try {
await amplitude.track({
event_type: 'some value'
})
} catch (err) {
console.error(err)
}
// Or...
amplitude
.track({
event_type: 'some value',
user_id: 'will-override-the-default-id'
})
.then(res => {
console.log('Amplitude response', res)
})
All methods return a Promise.
amplitude
.track(data)
.then(function(result) {
//... do something
})
.catch(function(error) {
//... do something
})
// Or..
try {
const result = await amplitude.track({
event_type: 'some value'
})
//... do something with result
} catch (error) {
console.error(error)
//... do something with the error
}
The export method requires your secret key to be added when initializing the amplitude object. This method uses the export api and requires a start and end string in the format YYYYMMDDTHH
.
The method returns a stream.
const fs = require('fs')
const stream = fs.createWriteStream('./may-2016-export.zip')
const amplitude = new Amplitude('api-token', { secretKey: 'secret' })
amplitude
.export({
start: '20160501T20',
end: '20160601T20'
})
.pipe(stream)
The user search method requires your secret key to be added when initializing the amplitude object. This method uses the dashboard api.
Search for a user with a specified Amplitude ID, Device ID, User ID, or User ID prefix.
const amplitude = new Amplitude('api-token', { secretKey: 'secret' })
amplitude.userSearch('user/device/amplitude id or user id prefix').then(res => {
const matches = res.matches // Array of matches
// How the match was made
// If exact match was made with user id or device id, type === 'match_user_or_device_id'
// If exact match was made with Amplitude ID, type === 'match_amplitude_id'
// If a partial match was made with a user id prefix, type === 'match_user_prefix'
// If no match was made, type === 'nomatch'
const type = res.type
})
The user activity method requires your secret key to be added when initializing the amplitude object. This method uses the dashboard api.
Get a user summary and their recent events. This method requires an Amplitude ID. You can use the user search method to find that.
const amplitude = new Amplitude('api-token', { secretKey: 'secret' })
amplitude.userActivity('Amplitude ID').then(function(res) {
const userData = res.userData // data about the user
const events = res.events // an array of events associated with the user
})
If there is nothing found for the passed Amplitude ID, the Promise will still resolve. The userData
object will contain empty values and the events
array will be empty:
{
userData: {
num_sessions: 0,
purchases: 0,
revenue: 0,
merged_amplitude_ids: [],
num_events: 0,
canonical_amplitude_id: 1,
user_id: null,
last_location: null,
usage_time: 0,
last_device_id: null,
device_ids: []
},
events: []
}
If you do not know the Amplitude ID, you can use the userSearch method to find it.
const amplitude = new Amplitude('api-token', { secretKey: 'secret' })
amplitude
.userSearch('user-id')
.then(function(res) {
// If you're using a prefix, you may get multiple matches and
// you may need to handle the case where there is not a match
const match = res.matches[0]
return amplitude.userActivity(match.amplitude_id)
})
.then(function(res) {
const userData = res.userData // data about the user
const events = res.events // an array of events associated with the user
})
The event segmentation method requires your secret key to be added when initializing the amplitude object. This method uses the dashboard api.
Get metrics for an event with segmentation.
const amplitude = new Amplitude('api-token', { secretKey: 'secret' })
amplitude
.eventSegmentation({
e: {
event_type: 'event_name'
},
start: '20170104',
end: '20170117'
})
.then(res => {
const segmentationData = res.data
})
Example response:
{ series: [ [ 2, 25, 3, 1, 0, 0, 2, 3, 5, 1, 0, 0, 0, 0 ] ],
seriesLabels: [ 0 ],
xValues:
[ '2017-01-04',
'2017-01-05',
'2017-01-06',
'2017-01-07',
'2017-01-08',
'2017-01-09',
'2017-01-10',
'2017-01-11',
'2017-01-12',
'2017-01-13',
'2017-01-14',
'2017-01-15',
'2017-01-16',
'2017-01-17' ] }
If the event does not exist, Amplitude will throw a 400 error.
View the CHANGELOG for changes in each version.
Author: Geoffdutton
Source Code: https://github.com/geoffdutton/amplitude
License: ISC license
1663398180
In this article, you will see 30 Data Analytics and Tracking projects on the Near Network
1. NEAR Explorer
Name | Description | Website |
NEAR Explorer | NEAR blockchain explorer is an online blockchain browser which displays the contents of individual NEAR blocks and transactions and the transaction histories and balances of addresses.
| https://explorer.near.org/ |
2. NEARSCAN
Name | Description | Website |
NEARSCAN | NEARSCAN allows you to explore and search the NEAR blockchain for blocks, transactions, accounts.
| https://www.nearscan.org/home |
3. NEAR-STAKING
Name | Description | Website |
NEAR-STAKING | See how decentralization is going on NEAR Network. NEAR-STAKING, see how decentralization is going and monitor if the pool you've delegated to is increasing or decreasing in stake. | https://near-staking.com/ |
4. NearTracker
Name | Description | Website |
NearTracker | NearTrackeris a collaborative analytics-statistics project between NearBots.com, the leading provider of automated bots for NEAR NFT projects, and Bullish Bulls, whose vision is to create a community where everyone has a voice and to provide additional value to each other and the NEAR ecosystem as a whole. | https://neartracker.io/ |
5. NearTracker
Name | Description | Website |
Nearscope | Dashboard and explorer for NEAR delegators and validators. | https://nearscope.net/ |
6. NearScanner
Name | Description | Website |
NearScanner | NearScanner is a free to use NFT stats aggregator and alerting system primarily based on the Paras marketplace on $NEAR. NearScanner uses tracking tools to optimize and monitor the product’s performance using privacy friendly tracking such as Plausible Analytics. NearScanner also offers NEAR wallet connection to authenticate logged in users and to ensure alerts are being fairly used cross platform.There will never be any situation where NearScanner will use any funds from your wallet.Always vet tools including NearScanner before you take actions. Always review the confirmation page on NEAR making sure to review wallet integrations permissions before you accept them. | https://www.nearscanner.com/ |
7. NearBlocks
Name | Description | Website |
NearBlocks | NearBlocks is a Blockchain Explorer and Analytics Platform for Near Protocol, a new blockchain and smart transaction platform. | https://nearblocks.io/ |
8. Token Terminal
Name | Description | Website |
Token Terminal | Fundamentals for crypto/Web3. Token Terminal is a platform that aggregates financial data on blockchains and decentralized applications (dapps) that run on blockhains. | https://tokenterminal.com/ |
9. DappRadar
Name | Description | Website |
DappRadar | DappRadar provides information and insights about all the existing dapps! At the tap of your finger, find the most successful and used decentralized games, casinos, and marketplaces, among others. In DappRadar, dapps can be sorted by many different metrics, daily users, daily volume, and more! | https://dappradar.com/ |
10. Ref Analytics
Name | Description | Website |
Ref Analytics | Ref Analytics (Sodaki) is a minimal data analytics web app, built on top of Ref Finance, the first Uniswap-like and multi-purpose Decentralized Finance (DeFi) platform on NEAR Protocol. | https://stats.ref.finance/ |
11. stats.gallery
Name | Description | Website |
stats.gallery | Big & fun NEAR account analytics. | https://stats.gallery/ |
12. NFT Reactor
Name | Description | Website |
NFT Reactor | NFT collection builder on NEAR. | https://reactor.art/ |
13. NFData
Name | Description | Website |
NFData | NFT portfolio management and NFT Market Data. | https://nonfungibledata.io/ |
14. Coinhall
Name | Description | Website |
Coinhall | Realtime Charts, DEX Aggregator & Analytics. | https://coinhall.org/ |
15. SimpleFi
Name | Description | Website |
SimpleFi | SimpleFi is a power tool that helps you make better DeFi investments. SimpleFi’s core mission is to make life easier for DeFi investors, so you can cut through the data, the hype and the scams to make the best investments you can. In that sense it’s part smart DeFi dashboard, part suggestion engine, and part magic investment migration tool. | https://simplefi.finance/ |
16. Dapplooker
Name | Description | Website |
Dapplooker | Dapplooker aims to empower NEAR network dapps to easily understand smart contracts data, do analytics and easily build beautiful explorers, charts and dashboards using simple to use visual SQL editor. Charts and dashboards can be forked and shared with everyone. | https://dapplooker.com/ |
17. Flipside Crypto
Name | Description | Website |
Flipside Crypto | Some say NEAR is an undervalued layer 1. We don't disagree. Its innovative infrastructure is improving the user experience in a way that many have noticed and that has attracted projects such as Mintbase, for NFT minting, and Flux, which enables users to create their own markets based on assets and real world-events. Giving developers these unique tools to build is only going to drive more growth, and we're here for it. We are actively working on making NEAR's on-chain data available to everyone for free, so analytics creation can be activated and rewarded via MetricsDAO. If you'd like to get involved, join the conversation in the MetricsDAO Discord. | https://flipsidecrypto.xyz/near |
18. Paras Analytics
Name | Description | Website |
Paras Analytics | Data & Analytics on Paras. AnalyticsNFTUtilities Paras Analytics official and social links About Paras Analytics What is Paras Analytics? What is ? Where can I get Paras Analytics tokens? Who invested Paras Analytics? How does Paras Analytics compare to other projects? Check out the volume and transactions from Paras, a digital collectible marketplace that supports and develops crypto-native IPs. | https://stats.paras.id/ |
19. Defy Trends
Name | Description | Website |
Defy Trends | Defy Trends is building a robust and reliable analytics platform that includes both on- and off-chain analytics for coins and tokens across chains. For our beginner and intermediate users, our data insights are curated with the intention of showing actionable, digestible insights from non-biased data, while our pro subscribers enjoy a larger product suite. Our off-chain analytics include sentiment, news, and Github analysis while our on-chain analytics range from data on addresses, HODL metrics, and whale tracking to data on derivatives. The user is able to graph indicators from both on- and off-chain data on the same graph. This is presented on an intuitive and inviting UI, with a high focus on UX and educational components that support the user in understanding the various indicators and their context in the greater market or an investment strategy. | https://www.defytrends.io/ |
20. The Unit
Name | Description | Website |
The Unit | The Unit is a decentralized Web3 index designed to become the unit of account of the Metaverse — bringing long-term stability to crypto. We compare The Unit index in crypto to the S&P 500 in the US stock market, but in this case, we also see The Unit as a crypto unit of account. | https://www.theunit.one/ |
21. Plex
Name | Description | Website |
Plex | Discover, Manage & Engage Audiences in Web3. | https://www.plexlabs.io/ |
22. WOMBI
Name | Description | Website |
WOMBI | Wombi helps DApps optimize marketing spending, measure traction, analyze the competitive landscape, and find more high-quality users on-chain. Wombi’s unique offering is bridging the gap between analytics for Web 2.0 and Web3. Wombi connects the off-chain and on-chain knowledge about users of DApps to understand where they come from (geo, channel) and what they do on-chain while giving the answers instantly, without the heavy configuration required in traditional analytical tools. | https://wombi.xyz/ |
23. DAOSTATS
Name | Description | Website |
DAOSTATS | DAOSTATS is a simple dashboard to get insights about different DAOs.
| https://daostats.io/astro/general-info |
24. UniWhales
Name | Description | Website |
UniWhales | UniWhales, Tracking the Whales of Uniswap. Realtime whale data. Track large volume transactions and traders on Uniswap BEFORE everyone else knows what they are up to.
| https://app.cielo.finance/near/bridge-tracker |
25. Datrics
Name | Description | Website |
Datrics | Datrics empowers blockchain teams with meaningful insights from on-chain and off-chain data. We apply our expertise in blockchain and data analytics to provide bespoke solutions for your business. Datrics team provides analytical services for a wide range of projects, including L1 and L2 protocols, DEXs, stable coins, DApps, and DAOs. You can use Datrics low-code tool to get data from smart-contracts analyse it push data back to the smart-contract. | https://datrics.ai/ |
26. AssetDash
Name | Description | Website |
AssetDash | AssetDash has support over 100 different platforms, blockchains, and wallets to make it easy to track all of your investments in real-time. You can now enter your NEAR address and track your NFT portfolio in real-time on AssetDash. You can use AssetDash privately and anonymously without needing to share your personal information. Just provide an email address and get started. | https://www.assetdash.com/ |
27. ArtCentral
Name | Description | Website |
ArtCentral | Find tons of insights about sales history and market prices on various marketplaces conveniently compiled for you. Our NFT Appraisal algorithm and underpriced NFT tools will help you identify exactly when to buy or sell.
| https://artcentral.io/ |
Top exchanges for token-coin trading on NEAR ecosystem: ☞ Binance ☞ Poloniex ☞ Bitfinex ☞ Huobi ☞ MXC ☞ ProBIT ☞ Gate.io
Read more: Infrastructure and Dev Tooling projects on Near Network
Thank you for reading !
1663227420
In today's post we will learn about 6 Favorite Libraries for Search and Analytic Databases in Go.
What is an Analytical Database?
Analytical database software specializes in big data management for business applications and services. Analytical databases are optimized to provide quick query response times and advanced analytics. They are also more scalable than traditional databases and often times are columnar databases that can efficiently write and read data to and from hard disk storage in order to speed up the time it takes to return a query. Analytical database features include column-based storage, in-memory loading of compressed data and the ability to search data through multiple attributes.
Table of contents:
Elastic is an Elasticsearch client for the Go programming language.
The release branches (e.g. release-branch.v7
) are actively being worked on and can break at any time. If you want to use stable versions of Elastic, please use Go modules.
Here's the version matrix:
Elasticsearch version | Elastic version | Package URL | Remarks |
---|---|---|---|
7.x | 7.0 | github.com/olivere/elastic/v7 (source doc) | Use Go modules. |
6.x | 6.0 | github.com/olivere/elastic (source doc) | Use a dependency manager (see below). |
5.x | 5.0 | gopkg.in/olivere/elastic.v5 (source doc) | Actively maintained. |
2.x | 3.0 | gopkg.in/olivere/elastic.v3 (source doc) | Deprecated. Please update. |
1.x | 2.0 | gopkg.in/olivere/elastic.v2 (source doc) | Deprecated. Please update. |
0.9-1.3 | 1.0 | gopkg.in/olivere/elastic.v1 (source doc) | Deprecated. Please update. |
Example:
You have installed Elasticsearch 7.0.0 and want to use Elastic. As listed above, you should use Elastic 7.0 (code is in release-branch.v7
).
To use the required version of Elastic in your application, you should use Go modules to manage dependencies. Make sure to use a version such as 7.0.0
or later.
To use Elastic, import:
import "github.com/olivere/elastic/v7"
Elastic 7.0 targets Elasticsearch 7.x which was released on April 10th 2019.
As always with major version, there are a lot of breaking changes. We will use this as an opportunity to clean up and refactor Elastic, as we already did in earlier (major) releases.
Elastic 6.0 targets Elasticsearch 6.x which was released on 14th November 2017.
Notice that there are a lot of breaking changes in Elasticsearch 6.0 and we used this as an opportunity to clean up and refactor Elastic as we did in the transition from earlier versions of Elastic.
This tool converts sql to elasticsearch dsl
Currently support:
go get -u github.com/cch123/elasticsql
Demo :
package main
import (
"fmt"
"github.com/cch123/elasticsql"
)
var sql = `
select * from aaa
where a=1 and x = '三个男人'
and create_time between '2015-01-01T00:00:00+0800' and '2016-01-01T00:00:00+0800'
and process_id > 1 order by id desc limit 100,10
`
func main() {
dsl, esType, _ := elasticsql.Convert(sql)
fmt.Println(dsl)
fmt.Println(esType)
}
will produce :
{
"query": {
"bool": {
"must": [
{
"match": {
"a": {
"query": "1",
"type": "phrase"
}
}
},
{
"match": {
"x": {
"query": "三个男人",
"type": "phrase"
}
}
},
{
"range": {
"create_time": {
"from": "2015-01-01T00:00:00+0800",
"to": "2016-01-01T00:00:00+0800"
}
}
},
{
"range": {
"process_id": {
"gt": "1"
}
}
}
]
}
},
"from": 100,
"size": 10,
"sort": [
{
"id": "desc"
}
]
}
aaa
If your sql contains some keywords, eg. order, timestamp, don't forget to escape these fields as follows:
select * from `order` where `timestamp` = 1 and `desc`.id > 0
A Go (Golang) based Elasticsearch client, implements core api for Indexing and searching.
NOTE: Based on the great work from Jeremy Shute, Elastigo now supports multiple connections. We attempted to make this backwards compatible, however in the end it wasn't possible, so we tagged the older single connection code as v1.0 and started work on v2.0.
If you want to use v1.0, you can use a tool like GoDep to make that possible. See http://bit.ly/VLG2et for full details.
The godep tool saves the exact version of the dependencies you’re building your project against, which means that upstream modifications in third-party dependencies won’t break your build.
go get github.com/tools/godep
Now, to pull in an existing project with godep:
godep go get github.com/myuser/myproject
When your code compiles in your workspace, ala:
cd $HOME/gopath/src/github.com/myuser/myproject
# hack hack hack
go build ./...
You can freeze your dependencies thusly:
godep save github.com/myuser/myproject
git add Godeps
The godep tool will examine your code to find and save the transitive closure of your dependencies in the current directory, observing their versions. If you want to restore or update these versions, see the documentation for the tool.
Note, in particular, that if your current directory contains a group of binaries or packages, you may save all of them at once:
godep save ./...
To get the Chef based Vagrantfile working, be sure to pull like so::
# This will pull submodules.
git clone --recursive git@github.com:mattbaird/elastigo.git
It's easier to use the ElasticSearch provided Docker image found here: https://github.com/dockerfile/elasticsearch
Non-persistent usage is:
docker run -d -p 9200:9200 -p 9300:9300 dockerfile/elasticsearch
Quick Start with Docker
Make sure docker is installed. If you are running docker on a mac, you must expose ports 9200 and 9300. Shut down docker:
boot2docker stop
and run
for i in {9200..9300}; do
VBoxManage modifyvm "boot2docker-vm" --natpf1 "tcp-port$i,tcp,,$i,,$i";
VBoxManage modifyvm "boot2docker-vm" --natpf1 "udp-port$i,udp,,$i,,$i";
done
The following will allow you to get the code, and run the tests against your docker based non-persistent elasticsearch:
docker run -d -p 9200:9200 -p 9300:9300 dockerfile/elasticsearch
git clone git@github.com:mattbaird/elastigo.git
cd elastigo
go get -u ./...
cd lib
go test -v -host localhost -loaddata
cd ..
go test -v ./...
Language clients are forward compatible; meaning that clients support communicating with greater or equal minor versions of Elasticsearch. Elasticsearch language clients are only backwards compatible with default distributions and without guarantees made.
When using Go modules, include the version in the import path, and specify either an explicit version or a branch:
require github.com/elastic/go-elasticsearch/v8 v8.0.0
require github.com/elastic/go-elasticsearch/v7 7.17
It's possible to use multiple versions of the client in a single project:
// go.mod
github.com/elastic/go-elasticsearch/v7 v7.17.0
github.com/elastic/go-elasticsearch/v8 v8.0.0
// main.go
import (
elasticsearch7 "github.com/elastic/go-elasticsearch/v7"
elasticsearch8 "github.com/elastic/go-elasticsearch/v8"
)
// ...
es7, _ := elasticsearch7.NewDefaultClient()
es8, _ := elasticsearch8.NewDefaultClient()
The main
branch of the client is compatible with the current master
branch of Elasticsearch.
Add the package to your go.mod
file:
require github.com/elastic/go-elasticsearch/v8 main
Or, clone the repository:
git clone --branch main https://github.com/elastic/go-elasticsearch.git $GOPATH/src/github.com/elastic/go-elasticsearch
A complete example:
mkdir my-elasticsearch-app && cd my-elasticsearch-app
cat > go.mod <<-END
module my-elasticsearch-app
require github.com/elastic/go-elasticsearch/v8 main
END
cat > main.go <<-END
package main
import (
"log"
"github.com/elastic/go-elasticsearch/v8"
)
func main() {
es, _ := elasticsearch.NewDefaultClient()
log.Println(elasticsearch.Version)
log.Println(es.Info())
}
END
go run main.go
Goes : a library to interact with ElasticSearch
You will find examples in example_test.go
Skizze ([ˈskɪt͡sə]: german for sketch) is a sketch data store to deal with all problems around counting and sketching using probabilistic data-structures.
Unlike a Key-Value store, Skizze does not store values, but rather appends values to defined sketches, allowing one to solve frequency and cardinality queries in near O(1) time, with minimal memory footprint.
Current status ==> Alpha (tagged v0.0.2)
SELECT count(v) WHERE v >= c1 AND v < c2)
?make dist
./bin/skizze
Two bindings are currently available:
go get github.com/skizzehq/goskizze/skizze
Documentationnpm install --save skizze
DocumentationSkizze comes with a CLI to help test and explore the server. It can be run via
./bin/skizze-cli
Create a new Domain (Collection of Sketches):
#CREATE DOM $name $estCardinality $topk
CREATE DOM demostream 10000000 100
Add values to the domain:
#ADD DOM $name $value1, $value2 ....
ADD DOM demostream zod joker grod zod zod grod
Get the cardinality of the domain:
# GET CARD $name
GET CARD demostream
# returns:
# Cardinality: 9
Get the rankings of the domain:
# GET RANK $name
GET RANK demostream
# returns:
# Rank: 1 Value: zod Hits: 3
# Rank: 2 Value: grod Hits: 2
# Rank: 3 Value: joker Hits: 1
Thank you for following this article.