$ docker version
$ docker search repo
$ docker pull username/repo
$ docker run learn/tutorial echo "hello world"
$ docker run learn/tutorial apt-get install -y ping
# shows information about running containers
$ docker ps
# shows information about running nd stopped containers
$ docker ps -a
# return the details of the last container started
$ docker ps -l
# create a new container
$ docker run IMAGE_ID CMD PARAMS
# tells Docker to run the container in the background.
$ docker run -d IMAGE_ID
# to docker map any ports exposed in our image to our host.
$ docker run -P IMAGE_ID
$ docker run -p 5000:5000 IMAGE_ID
$ docker inspect IMAGE_ID
$ docker run -i -t IMAGE_ID /bin/bash
# Create a new image from a container's changes
# The -m flag allows us to specify a commit message, much like you would with a commit on a version control system.
# The -a flag allows us to specify an author for our update
$ docker commit CONTAINER_ID IMAGE_NAME -a="Author Name"
# Examine the processes running inside the container
$ docker top CONTAINER_ID
# restart the old container again
$ docker start CONTAINER_ID
$ docker stop CONTAINER_ID
# Attach to a running container
$ docker attach CONTAINER_ID
# docker execute an command on container and keep stdin interactive
$ docker exec -it  CONTAINER_ID  /bin/bash
# Build an image from a Dockerfile
# Add a tag to an existing image after you commit or build it.
# Remove image from Docker host
$ docker rmi IMAGE_ID
$ docker inspect CONTAINER_ID | grep IPAddress | cut -d '"' -f 4.
# Narrow down the information we want to return by requesting a specific element
$ docker inspect -f '{{ .NetworkSettings.IPAddress }}' CONTAINER_ID
$ docker logs CONTAINER_ID
# This causes the docker logs command to act like the tail -f command and watch the container's standard out.
$ docker logs -f CONTAINER_ID
# Adding a data volume
$ docker run -i -t -v /HOST/DIRECTORY IMAGE_ID CMD
# Mount a host directory as a data volume using the -v flag
# Docker defaults to a read-write volume but we can also mount a directory read-only.
$ docker images --tree
# Remove all Exited Docker containers
$ docker ps -a | grep Exited | cut -d ' ' -f 1 | xargs docker rm
$ docker ps -a | grep Exited | awk '{print $1}'| xargs docker rm
$ docker rm $(docker ps -a -q)
# remove <none> images
$ docker images | grep none | awk '{print $3}'| xargs docker rmi
# remove all images
$ docker rmi $(docker images -q)
# remove container after running
$ docker run  --rm -i -t  IMAGE_ID CMD


An image can’t have more than 127 layers regardless of the storage driver.

This limitation is set globally to encourage optimization of the overall size of images.

Create base kali image

    # Install dependencies (debbootstrap)
apt-get install debootstrap

    # Fetch the latest Kali debootstrap script from git
curl ";a=blob_plain;f=scripts/kali;hb=HEAD" > kali-debootstrap

    # Download kali packages
debootstrap kali ./kali-root ./kali-debootstrap

    # Create image
tar -C kali-root -c . | docker import - kali_base_1.0.9

    # Run image
docker run -t -i kali_base_1.0.9 /bin/bash

Install docker on Debian

$ sudo apt-get purge lxc-docker*
$ sudo apt-get purge*
$ sudo apt-get update
$ sudo apt-get install apt-transport-https ca-certificates
$ sudo apt-key adv --keyserver hkp:// --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
$ sudo vim /etc/apt/sources.list.d/docker.list
    # On Debian Stretch/Sid
    deb debian-stretch main
$ sudo apt-get update
$ sudo apt-cache policy docker-engine
$ sudo apt-get install docker-engine
$ sudo service docker start

Install docker on Ubuntu Server

$ sudo apt-get update
$ sudo apt-get install apt-transport-https ca-certificates curl software-properties-common
$ curl -fsSL | sudo apt-key add -
$ sudo add-apt-repository "deb [arch=amd64] $(lsb_release -cs) stable"
$ sudo apt-get update
$ sudo apt-get install docker-ce

Set HTTP Proxy for docker

# systemctl status docker | grep Loaded
        Loaded: loaded (/lib/systemd/system/docker.service; enabled; vendor preset: enabled)
$ vim  /lib/systemd/system/docker.service

Add Environment to docker.service:

Environment="HTTP_PROXY=" "NO_PROXY=localhost,"
$ sudo systemctl show docker --property Environment
$ sudo systemctl daemon-reload
$ sudo systemctl show docker --property Environment
$ sudo systemctl restart docker

Set HTTP Proxy for docker on Ubuntu 12.04.3 LTS

$ sudo vim /etc/default/docker
    export http_proxy="http://PROXY_IP:PROXY_PORT"
$ sudo service docker restart

how to let docker container work with sshuttle?

we need -l so that docker containers with “remote ip” can connect to the tunnel.

$ sshuttle -l -vvr <USER>@<IP> 0/0

How can I use docker without sudo?

$ sudo groupadd docker
$ sudo usermod -a -G docker ${USER}
$ sudo service docker restart
# To prevent log out and log back in again,
# to pick up the new docker group permissions on the current bash session
$ newgrp docker

Install Docker Compose

$ sudo su
$ curl -L`uname -s`-`uname -m` > /usr/local/bin/docker-compose
$ sudo chmod +x /usr/local/bin/docker-compose
$ exit
$ docker-compose --version
# docker-compose version 1.9.0, build 2585387

Dockerfile reference:

Docker Compose


Install docker machine

$ apt-get install virtualbox
$ sudo curl -L`uname -s`-`uname -m` >/usr/local/bin/docker-machine && chmod +x /usr/local/bin/docker-machine
$ docker-machine -v
# docker-machine version 0.6.0-rc4, build a71048c

How to use docker machine

Docker Machine allows you to provision Docker on virtual machines that reside either on your local system or on a cloud provider.

Docker Machine creates a host on a VM and you use the Docker Engine client as needed to build images and create containers on the host.

You all might have had that moment like “ Ahh man! I have to execute all these commands again!!”.

And if you are that guy who hates to configure a docker host again and again, docker-machine is there for the rescue.

So, you can leave all the installation and configuration tasks of docker to docker-machine.

Docker machine lets you spin up docker host VMs locally on your laptop,

a cloud-provider (AWS, Azure etc) and your private data center (OpenStack, Vsphere etc).

Not only docker host provisioning, using docker machine you can manage deploy and manage containers on individual hosts.

First, ensure that the latest VirtualBox is correctly installed on your system.

$ docker-machine ls
$ docker-machine create --driver virtualbox <machine-name>
$ docker-machine create --driver virtualbox default
#(default) Boot2Docker v1.9.1 has a known issue with AUFS.
#(default) See here for more details:
#(default) Consider specifying another storage driver (e.g. 'overlay') using '--engine-storage-driver' instead.
$ docker-machine create --engine-storage-driver overlay --driver virtualbox default
$ docker-machine env <machine-name>
# export DOCKER_TLS_VERIFY="1"
# export DOCKER_HOST="tcp://"
# export DOCKER_CERT_PATH="/home/or/.docker/machine/machines/default"
# export DOCKER_MACHINE_NAME="default"
# # Run this command to configure your shell:
# # eval $(docker-machine env default)
$ eval $(docker-machine env default)
$ docker ps
$ docker images
$ docker-machine stop <machine-name>
$ docker-machine restart <machine-name>
$ docker-machine start <machine-name>
$ docker history IMAGE_ID

Docker toolbox


Docker misconceptions

Service orchestration and management tool

Service discovery

Docker on multi host

An overlay network

Docker’s overlay network driver supports multi-host networking natively out-of-the-box. This support is accomplished with the help of libnetwork, a built-in VXLAN-based overlay network driver, and Docker’s libkv library.

Docker Engine supports multi-host networking out-of-the-box through the overlay network driver. Unlike bridge networks, overlay networks require some pre-existing conditions before you can create one. These conditions are:

Access to a key-value store. Docker supports Consul, Etcd, and ZooKeeper (Distributed store) key-value stores.

A cluster of hosts with connectivity to the key-value store.

A properly configured Engine daemon on each host in the cluster.

Hosts within the cluster must have unique hostnames because the key-value store uses the hostnames to identify cluster members.

docker machine

How to run a command on an already existing docker container?

if the container is stopped and can’t be started due to an error, you’ll need to commit it. Then you can launch bash in an image:

$ docker commit CONTAINER_ID temporary_image
$ docker run -it temporary_image /bin/bash

Removing Docker data volumes?

$ du -h --max-depth=1 /var/lib/docker  | sort -hr
$ docker volume rm $(docker volume ls -qf dangling=true)

Clear log history

$ vim


for container_id in $(docker ps -a --filter="name=$name" -q);

    do file=$(docker inspect $container_id | grep -G '"LogPath": "*"' | sed -e 's/.*"LogPath": "//g' | sed -e 's/",//g');

    if [ -f $file ]
          rm $file;


$ chmod +x $ sudo ./

Set maximum concurrent download for docker pull

$ sudo vim  /lib/systemd/system/docker.service

ExecStart=/usr/bin/dockerd -H fd:// --max-concurrent-downloads 1

$ sudo systemctl daemon-reload
$ systemctl restart docker

Override the ENTRYPOINT using docker run

docker run -it  --entrypoint "/bin/bash"  --rm -v "$PWD":/ws/omr/  lsakalauskas/sdaps

Set image name when building a custom image

$ docker build -t image_name .

Set environment variables during the build in docker

FROM ubuntu:18.04
RUN apt-get update
ARG DEBIAN_FRONTEND=noninteractive

The ARG is for setting environment variables which are used during the docker build process, and they are not present in the final image

Remove unused, <none>, untag docker images file

$ docker image prune -f

Disable auto-restart on a container

$ docker update --restart=no container-id

Minimal base docker OS images

$ docker pull python:3.6-alpine
$ docker images | grep -i python
#    python          3.6-alpine          cb04a359db13        3 days ago          74.3MB

$ docker pull
$ docker images | grep -i distroless
#   latest              523f07cec1e2        49 years ago        50.9MB

Note that there is no `docker exec(run) -it  ...` in distroless image.