Skip to content
Snippets Groups Projects
Commit 38835379 authored by Sameer Al-Sakran's avatar Sameer Al-Sakran
Browse files

Merge branch 'master' into update_docs

parents dcd53d3d d1e66dc2
No related branches found
No related tags found
No related merge requests found
Showing
with 359 additions and 31 deletions
......@@ -4,24 +4,31 @@
(put 'defannotation 'clojure-doc-string-elt 2)
(put 'defendpoint 'clojure-doc-string-elt 3)
(put 'defhook 'clojure-doc-string-elt 2)
(put 'defna 'clojure-doc-string-elt 2)
(put 'defne 'clojure-doc-string-elt 2)
(put 'defsetting 'clojure-doc-string-elt 2)
;; Define custom indentation for functions inside metabase.
;; This list isn't complete; add more forms as we come across them.
(define-clojure-indent
(api-let 2)
(assert 1)
(assoc* 1)
(auto-parse 1)
(catch-api-exceptions 0)
(check 1)
(checkp 1)
(conda 0)
(context 2)
(create-database-definition 1)
(dataset-case 0)
(execute-query 1)
(execute-sql! 2)
(expect 1)
(expect-eval-actual-first 1)
(expect-expansion 0)
(expect-let 1)
(expect-when-testing-against-dataset 1)
(expect-when-testing-dataset 1)
(expect-when-testing-mongo 1)
(expect-with-all-drivers 1)
(expect-with-dataset 1)
......@@ -32,10 +39,22 @@
(let-500 1)
(match 1)
(match-$ 1)
(matcha 1)
(matche 1)
(matchu 1)
(macrolet 1)
(org-perms-case 1)
(pdoseq 1)
(post-insert 1)
(post-select 1)
(post-update 1)
(pre-cascade-delete 1)
(pre-insert 1)
(pre-update 1)
(project 1)
(qp-expect-with-all-datasets 1)
(qp-expect-with-datasets 1)
(query-with-temp-db 1)
(resolve-private-fns 1)
(symbol-macrolet 1)
(sync-in-context 2)
......
......@@ -19,6 +19,9 @@ profiles.clj
/*.lock.db
/*.trace.db
/resources/frontend_client/app/dist/
/resources/frontend_client/index.html
/node_modules/
/.babel_cache
/coverage
/resources/sample-dataset.db.trace.db
/deploy/artifacts/*
#! /bin/bash
echo "Running 'npm install' to download javascript dependencies..." &&
npm install &&
if [ -n "$CI_DISABLE_WEBPACK_MINIFICATION" ]; then
echo "Running 'webpack' to assemble and minify frontend assets..."
./node_modules/webpack/bin/webpack.js
else
echo "Running 'webpack -p' to assemble and minify frontend assets..."
./node_modules/webpack/bin/webpack.js -p
fi &&
if [ -f resources/sample-dataset.db.mv.db ]; then
echo "Sample Dataset already generated."
else
echo "Running 'lein generate-sample-dataset' to generate the sample dataset..."
lein generate-sample-dataset
fi &&
echo "Running 'lein uberjar'..." &&
lein uberjar
......@@ -4,12 +4,28 @@ machine:
java:
version:
oraclejdk8
python:
version: 2.7.3
dependencies:
override:
- lein deps
- pip install awscli==1.7.3
database:
post:
# MySQL doesn't load named timezone information automatically, you have to run this command to load it
- mysql_tzinfo_to_sql /usr/share/zoneinfo | mysql -u ubuntu mysql
test:
override:
# 0) runs unit tests w/ H2 local DB. Runs against both Mongo + H2 test datasets
# 1) runs unit tests w/ Postgres local DB. Only runs against H2 test dataset so we can be sure tests work in either scenario
# 2) runs Eastwood linter + Bikeshed linter
# 3) runs JS linter + JS test
# 4) runs lein uberjar
- case $CIRCLE_NODE_INDEX in 0) MB_TEST_DATASETS=generic-sql,mongo lein test ;; 1) MB_DB_TYPE=postgres MB_DB_DBNAME=circle_test MB_DB_PORT=5432 MB_DB_USER=ubuntu MB_DB_HOST=localhost lein test ;; 2) lein eastwood && lein bikeshed --max-line-length 240 ;; 3) npm run lint && npm run build && npm run test ;; 4) CI_DISABLE_WEBPACK_MINIFICATION=1 lein uberjar ;; esac:
# 0) runs unit tests w/ H2 local DB. Runs against Mongo, H2, Postgres
# 1) runs unit tests w/ Postgres local DB. Runs against H2, MySQL
# 2) runs Eastwood linter
# 3) Bikeshed linter
# 4) runs JS linter + JS test
# 5) runs ./build-uberjar
- case $CIRCLE_NODE_INDEX in 0) MB_TEST_DATASETS=h2,mongo,postgres lein test ;; 1) MB_TEST_DATASETS=h2,mysql MB_DB_TYPE=postgres MB_DB_DBNAME=circle_test MB_DB_PORT=5432 MB_DB_USER=ubuntu MB_DB_HOST=localhost lein test ;; 2) lein eastwood ;; 3) lein bikeshed --max-line-length 240 ;; 4) npm install && npm run lint && npm run build && npm run test ;; 5) CI_DISABLE_WEBPACK_MINIFICATION=1 ./build-uberjar ;; esac:
parallel: true
deployment:
master:
branch: master
commands:
- ./deploy/deploy_aws.sh $STACK_ID $APP_ID
option_settings:
- namespace: aws:elasticbeanstalk:command
option_name: Timeout
value: 600
commands:
test_command:
command: sed -i 's/location \/ {/location \/ {\nif ($http_x_forwarded_proto != "https") {\n set $var "redirect";\n}\n\nif ($request_uri = "\/api\/health") {\n set $var "${var}_health";\n}\n\nif ($var = 'redirect') {\n rewrite ^ https:\/\/$host$request_uri? permanent;\n}\n/' *-proxy.conf
cwd: /etc/nginx/sites-available
ignoreErrors: true
FROM ubuntu:trusty
ENV LC_ALL C
ENV LANG C.UTF-8
ENV DEBIAN_FRONTEND noninteractive
ENV DEBCONF_NONINTERACTIVE_SEEN true
ENV MB_JETTY_PORT 3000
# basic update of our system + adding Java
RUN apt-get update && \
apt-get install -y openjdk-7-jre
# include our local build in the image
# TODO: eventually we could probably set this up to download the jar file dynamically
COPY ./metabase-standalone.jar /app/
COPY ./run_metabase.sh /app/
RUN chmod 755 /app/run_metabase.sh
# make our webserver port available
EXPOSE 3000
# run it
ENTRYPOINT ["/app/run_metabase.sh"]
{
"AWSEBDockerrunVersion": "1",
"Logging": "/var/log/metabase"
}
#!/bin/bash
# Metabase Web Container
export MB_JETTY_HOST=$HOSTNAME
# NOTE: we set MB_JETTY_PORT in our Dockerfile in order to ensure we bind to the port exposed by Docker
# Metabase Database Info
# TODO: we could make this generic by first checking if the $RDS_* env variables are available and if
# so then apply the code below and map them to our Metabase env variables
export MB_DB_DBNAME=$RDS_DB_NAME
export MB_DB_USER=$RDS_USERNAME
export MB_DB_PASS=$RDS_PASSWORD
export MB_DB_HOST=$RDS_HOSTNAME
export MB_DB_PORT=$RDS_PORT
# TODO: dynamically determine type, probably using the port number
export MB_DB_TYPE=postgres
java -Dlogfile.path=target/log -XX:+CMSClassUnloadingEnabled -XX:+UseConcMarkSweepGC -jar /app/metabase-standalone.jar
#!/bin/bash
# for reference
# CIRCLE_SHA1=a3262e9b60a25e6a8a7faa29478b2b455b5ec4a3
# CIRCLE_BRANCH=master
if [ $# -ne 2 ]; then
echo "usage: $0 stackid appid"
exit 1
fi
STACKID=$1
APPID=$2
echo "deploying $CIRCLE_SHA1 from $CIRCLE_BRANCH ..."
aws opsworks create-deployment --stack-id $STACKID --app-id $APPID --comment "deploying $CIRCLE_SHA1 from $CIRCLE_BRANCH" --command='{"Name": "deploy"}'
#!/bin/bash
set -eo pipefail
BASEDIR=$(dirname $0)
source "$BASEDIR/functions"
if [ -z $1 ]; then
echo "Oops! You need to specify the name of the EB app version to deploy."
exit 1
fi
EB_VERSION_LABEL=$1
EB_ENVIRONMENT=metabase-proto
# deploy EB version to environment
deploy_version ${EB_ENVIRONMENT} ${EB_VERSION_LABEL}
#!/bin/bash
set -eo pipefail
BASEDIR=$(dirname $0)
source "$BASEDIR/functions"
EB_ENVIRONMENT=metabase-staging
# deploy EB version to environment
deploy_version ${EB_ENVIRONMENT}
#!/bin/bash
set -eo pipefail
BASEDIR=$(dirname $0)
source "$BASEDIR/functions"
# deploy EB version to environment
deploy_version "$1" "$2"
#!/bin/bash
set -eo pipefail
[[ -f /root/bin/aws.sh ]] && source /root/bin/aws.sh
BASEDIR=$(dirname $0)
PROJECT_ROOT=$(cd ${BASEDIR}/..; pwd)
ARTIFACTS_DIR="$PROJECT_ROOT/deploy/artifacts"
ARTIFACTS_S3BUCKET=${S3BUCKET:=metabase-artifacts}
BRANCH=$(cd ${PROJECT_ROOT}; $(which git) rev-parse --abbrev-ref HEAD)
# OpsWorks creates a deploy branch. We'll use master in this case
[[ "$BRANCH" == "deploy" ]] && BRANCH="master"
COMMITISH=$(cd ${PROJECT_ROOT}; $(which git) rev-parse --short HEAD)
DATE=$(date +%Y-%m-%d)
DEFAULT_RELEASE_ZIP_FILE_NAME="metabase-$BRANCH-$DATE-$COMMITISH.zip"
export LANG=en_US.UTF-8
export LANGUAGE=$LANG
export LC_ALL=$LANG
build_uberjar() {
[[ "$USER" == "root" ]] && export LEIN_ROOT=true
$(which locale) | $(which sort) || true
echo "building uberjar"
${PROJECT_ROOT}/build-uberjar
}
upload_release_artifacts() {
$(which locale) | $(which sort) || true
echo "uploading $ARTIFACTS_DIR/*.jar -> $ARTIFACTS_S3BUCKET/jar/"
aws s3 cp $ARTIFACTS_DIR/ s3://$ARTIFACTS_S3BUCKET/jar/ --recursive --exclude "*" --include "*.jar"
echo "uploading $ARTIFACTS_DIR/*.zip -> $ARTIFACTS_S3BUCKET/eb/"
aws s3 cp $ARTIFACTS_DIR/ s3://$ARTIFACTS_S3BUCKET/eb/ --recursive --exclude "*" --include "*.zip"
}
mk_release_artifacts() {
METABASE_JAR_NAME="metabase-standalone.jar"
RELEASE_TYPE="aws-eb-docker"
RELEASE_JAR_FILE_NAME=${METABASE_JAR_NAME%-standalone.jar}-$BRANCH-$DATE-$COMMITISH.jar
RELEASE_ZIP_FILE_NAME="$1"
UBERJAR_DIR="${PROJECT_ROOT}/target/uberjar"
if [[ -z $RELEASE_ZIP_FILE_NAME ]]; then
RELEASE_ZIP_FILE_NAME=$DEFAULT_RELEASE_ZIP_FILE_NAME
echo "release name not provided defaulting to $RELEASE_ZIP_FILE_NAME"
fi
RELEASE_FILES="${PROJECT_ROOT}/deploy/${RELEASE_TYPE}"
RELEASE_FILE="${PROJECT_ROOT}/${RELEASE_ZIP_FILE_NAME}"
# package up the release files
cd $RELEASE_FILES; zip -r $RELEASE_FILE * .ebextensions
# add the built uberjar
cd $UBERJAR_DIR; cp metabase-*-SNAPSHOT-*.jar $METABASE_JAR_NAME ; zip $RELEASE_FILE $METABASE_JAR_NAME
mkdir -p $ARTIFACTS_DIR
rm -f $ARTIFACTS_DIR/*
mv -f $RELEASE_FILE $ARTIFACTS_DIR/
mv -f $UBERJAR_DIR/$METABASE_JAR_NAME $ARTIFACTS_DIR/$RELEASE_JAR_FILE_NAME
upload_release_artifacts
}
create_eb_version() {
EB_APPLICATION=Metabase
EB_VERSION_LABEL=$1
S3_KEY=$2
$(which locale) | $(which sort) || true
[[ -z "$EB_VERSION_LABEL" ]] && EB_VERSION_LABEL="$BRANCH-$DATE-$COMMITISH"
[[ -z "$S3_KEY" ]] && S3_KEY=$DEFAULT_RELEASE_ZIP_FILE_NAME
echo "Creating app version in EB"
aws elasticbeanstalk create-application-version --no-auto-create-application --region us-east-1 --application-name ${EB_APPLICATION} --version-label ${EB_VERSION_LABEL} --source-bundle S3Bucket="${ARTIFACTS_S3BUCKET}",S3Key="eb/${S3_KEY}"
}
deploy_version() {
EB_ENVIRONMENT=$1
EB_VERSION_LABEL=$2
$(which locale) | $(which sort) || true
[[ -z "$EB_ENVIRONMENT" ]] && EB_VERSION_LABEL="metabase-staging" && echo ""
[[ -z "$EB_VERSION_LABEL" ]] && EB_VERSION_LABEL="$BRANCH-$DATE-$COMMITISH"
aws elasticbeanstalk update-environment --region us-east-1 --environment-name ${EB_ENVIRONMENT} --version-label ${EB_VERSION_LABEL}
}
#!/bin/bash
set -eo pipefail
BASEDIR=$(dirname $0)
source "$BASEDIR/functions"
build_uberjar
mk_release_artifacts "$1"
#!/bin/bash
set -eo pipefail
BASEDIR=$(dirname $0)
source "$BASEDIR/functions"
create_eb_version "$1" "$2"
# About the Information we collect:
Metabase uses Google Analytics to collect anonymous usage information from the installed servers that enable this feature. Below are the events we have instrumented, as well as the information we collect about the user performing the action and the instance being used.
While this list of anonymous information we collect might seem long, it’s useful to compare this to other alternatives. With a typical SaaS platform, not only will this information be collected, but it will also be accompanied by information about your data, how often it is accessed, the specific queries that you use, specific numbers of records all tied to your company and current plan.
We collect this information to improve your experience and the quality of Metabase, and in the list below, we spell out exactly why we collect each bit of information.
If you prefer not to provide us with this anonymous usage data, please go to your instance’s admin section and set the “collect-anonymouse-usage-metrics” value to False.
### Example questions we want to answer:
* Is our query interface working?
* Are users stopping halfway through a question?
* Are users using filters?
* Are users using groupings?
* How often are users using bare rows vs other aggregation options?
* are people clicking on column headings to sort or manually adding a sort clause?
* How often are users writing SQL instead of using the query interface?
* are these queries written by a select group of analysts or is the entire company sql literate?
* Are people using dashboards as a starting point for queries?
* how many clicks are there on dashboard cards?
* How many of these clicks result in modified queries that are executed?
* How often are questions saved?
* How often are saved questions added to dashboards?
### What we will do with the answers to these questions:
* Prioritize improvements in the query interface vs the SQL interface.
* Optimize the product for the usage patterns our users are using the product for
* Stay on top of browser incompatibilities
* Optimize our dashboards for either passive consumption or as a starting point for further exploration depending on how they are being used
While we will closely follow reported issues and feature requests, we aim to make as many of our users happy and provide them with improvements in features that matter to them. Allowing us to collect information about your instance gives your users a vote in future improvements in a direct way.
# The data we collect:
### Events
| Category | Action | Why we collect this|
|---------|--------|--------------------|
| Card Query Builder | Card added to dashboard | To understand how often users add cards to dashboards. If we find that people mainly add cards vs keep them free standing, we will prioritize dashboards features vs ad hoc questioning. |
| Card Query Builder | filter added | Are users actively filtering in queries or using materialized views? |
| Card Query Builder | aggregation added | Are users mainly looking at rows or segments of tables or running aggregate metrics. If the former, we intend to improve the power of our segmentation features. |
| Card Query Builder | group by added | How often do users slice and dice information by dimensions? Is this intuitive? Are users trying and failing to do this? |
| Card Query Builder | sort added | How often do users manually sort vs use the sort icon on the columns? |
| Card Query Builder | sort icon clicked | How often do users manually sort vs use the sort icon on the columns? |
| Card Query Builder | limit applied | How often do users manually limit the results that come back? |
| Card Query Builder | query ran | Looking for mismatches between people adding sorts, limits, etc and actually running the query. Are there discrepencies between these numbers and the rest of the query clause events? Are there browsers or languages where these numbers are out of wack? |
| Card Query Builder | saved | How often are users saving a question for later vs running quick Ad Hoc questions? |
| SQL Query | started | How often do users need to revert to SQL? If this is very high, it’s an indication our query language is not expressive enough or the query builder easy enough. We watch this number to understand how to improve our query language. |
| SQL Query | run | How often are sql queries started but not run? This is used as an alerting condition on bugs or issues with the SQL interface. |
| SQL Query | saved | How often are people saving sql backed questions? |
| SQL Query | Card added to dashboard | This helps us understand whether our query language is expressive enough for ad hoc queries, whether it is also expressive enough for canonical dashboards, or if it doesn’t go far enough in one or both of those cases. |
| Dashboard | Rearrange Started | How often do users wish to rearrange their dashboards? |
| Dashboard | Rearrange Finished | How often do users commit their changes to dashboard lay out. If this number is much less than rearrange starts, there might be a bug or UX issue. |
| Dashboard | Card Clicked | How often are dashboard cards used as a starting point for further exploration? |
### Visitor Custom Variables
| Name | Value | Why we collect this |
| ---- | ----- | ------------------- |
| User newb score | # sql queries written | To understand if “bad” conditions (such as users starting queries but not executing them) are due to novice users |
| User newb score |# non-sql queries written |To understand if “bad” conditions (such as users starting queries but not executing them) are due to novice users|
| User newb score | total queries written | To understand if “bad” conditions (such as users starting queries but not executing them) are due to novice users|
| Instance newb score |# databases |To understand how many databases are typically used by our users. If we see that a typical installation has many databases, we will prioritize |
| Instance newb score | # admins | To know if we should provide tools that allow a group of admins to collaboratively annotate data, manage connections, or we can ignore these features. |
| Instance newb score | # active users |To understand if problems or active usage correlate with how many users are using the instance. Are there issues that compound as more users are asking questions and creating dashboards?|
| Instance newb score |# dashboards| How many dashboards do our instances typically have? Should we optimize our interface for companies with a few core dashboards or many special use dashboards?|
| Instance newb score |# saved sql questions | To understand how much our user base depends on raw SQL questions vs using our query language. This determines how much we emphasis on tools for writing SQL vs improving the query lanauge.|
| Instance newb score |# saved non-sql questions | To understand how much our user base depends on raw SQL questions vs using our query language. This determines how much we emphasis on tools for writing SQL vs improving the query lanauge.|
| Instance newb score |% tables annotated |To understand if certain “bad” conditions (such as users starting queries but not executing them) are linked to whether there is enough metadata added by the instance administrators|
| Instance newb score |% fields annotated |To understand if certain “bad” conditions (such as users starting queries but not executing them) are linked to whether there is enough metadata added by the instance administrators |
......@@ -36,7 +36,7 @@ Then run the HTTP server with
Check that the project can compile successfully with
lein uberjar
./build-uberjar
Run the linters with
......
(ns leiningen.npm
(:use clojure.java.shell))
(defn npm [projects & args]
;; TODO - some better validations such as checking if `npm` is available
(println "Running `npm install` to download javascript dependencies")
(let [result (sh "npm" "install")]
(if (= 0 (:exit result))
(println (:out result))
(println (:err result)))))
\ No newline at end of file
(ns leiningen.webpack
(:require [clojure.java.shell :refer :all]))
;; Set the CI_DISABLE_WEBPACK_MINIFICATION environment variable to skip minification which takes ~6+ minutes on CircleCI
(defn webpack [projects & args]
;; TODO - some better validations such as checking that we have webpack available
(println "Running `webpack -p` to assemble and minify frontend assets")
(let [result (sh (str (:root projects) "/node_modules/webpack/bin/webpack.js") (if (System/getenv "CI_DISABLE_WEBPACK_MINIFICATION") ""
"-p"))]
(if (= 0 (:exit result))
(println (:out result))
(println (:err result)))))
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment