Compare commits

..

1 Commits

Author SHA1 Message Date
Ivan Mikushin
ceeb4ecfbd v0.4.3 2016-02-15 14:32:36 -08:00
2428 changed files with 135525 additions and 263452 deletions

View File

@@ -1,17 +1,16 @@
.DS_Store
.git
.idea
.trash-cache
.vendor
.dockerignore
bin
gopath
tmp
state
build
images/*/build
scripts/images/*/dist/
dist
Godeps/_workspace/pkg
tests/integration/.venv*
tests/integration/.tox
*/*/*/*.pyc
*/*/*/__pycache__
.trash-cache
.dapper
vendor/*/*/*/.git
tmp

15
.dockerignore.docker Normal file
View File

@@ -0,0 +1,15 @@
.DS_Store
.git
.idea
.vendor
.dockerignore
bin
gopath
tmp
state
build
Godeps/_workspace/pkg
tests/integration/.venv*
tests/integration/.tox
*/*/*/*.pyc
*/*/*/__pycache__

View File

@@ -1,8 +1,6 @@
---
pipeline:
build:
image: rancher/dapper:1.10.3
volumes:
- /var/run/docker.sock:/var/run/docker.sock
commands:
- dapper ci
build:
image: rancher/dapper:1.9.1
volumes:
- /var/run/docker.sock:/var/run/docker.sock
commands:
- dapper -k -O make DEV_BUILD=1 test

View File

@@ -1,5 +0,0 @@
**RancherOS Version: (ros os version)**
**Where are you running RancherOS? (docker-machine, AWS, GCE, baremetal, etc.)**

6
.gitignore vendored
View File

@@ -5,14 +5,10 @@
/build
/dist
/gopath
/images/*/build
.dockerfile
*.swp
/tests/integration/MANIFEST
/tests/integration/.venv*
/tests/integration/.tox
/tests/integration/.idea
*.pyc
__pycache__
/.dapper
/.trash-cache
__pychache__

1
.package Normal file
View File

@@ -0,0 +1 @@
github.com/rancher/os

1
.wrap-docker-args Normal file
View File

@@ -0,0 +1 @@
--privileged

11
Dockerfile Normal file
View File

@@ -0,0 +1,11 @@
FROM debian:jessie
ENV DEBIAN_FRONTEND noninteractive
RUN apt-get update && apt-get install -y grub2 parted
COPY ./scripts/installer /scripts
COPY ./build.conf /scripts/
COPY ./dist/artifacts/vmlinuz /dist/vmlinuz
COPY ./dist/artifacts/initrd /dist/initrd
ENTRYPOINT ["/scripts/lay-down-os"]

View File

@@ -1,153 +1,57 @@
FROM ubuntu:16.04
# FROM arm64=aarch64/ubuntu:16.04 arm=armhf/ubuntu:16.04
FROM ubuntu:15.10
RUN apt-get update && \
apt-get -y install locales sudo vim less curl wget git rsync build-essential syslinux isolinux xorriso \
libblkid-dev libmount-dev libselinux1-dev cpio genisoimage qemu-kvm python-pip ca-certificates
RUN locale-gen en_US.UTF-8
ENV LANG en_US.UTF-8
ENV PATH $PATH:/usr/local/go/bin
RUN mkdir -p /go/src /go/bin && chmod -R 777 /go
ENV GOPATH /go
ENV PATH /go/bin:$PATH
ENV GO15VENDOREXPERIMENT 1
RUN pip install tox
RUN curl -sSL https://storage.googleapis.com/golang/go1.5.3.linux-amd64.tar.gz | tar -xz -C /usr/local
RUN curl -sL https://get.docker.com/builds/Linux/x86_64/docker-1.9.1 > /usr/local/bin/docker
RUN chmod +x /usr/local/bin/docker
ENV DAPPER_ENV VERSION DEV_BUILD
ENV DAPPER_DOCKER_SOCKET true
ENV DAPPER_SOURCE /go/src/github.com/rancher/os
ENV DAPPER_OUTPUT ./bin ./dist ./build/initrd
ENV DAPPER_OUTPUT ./bin ./dist
ENV DAPPER_RUN_ARGS --privileged
ENV TRASH_CACHE ${DAPPER_SOURCE}/.trash-cache
ENV SHELL /bin/bash
WORKDIR ${DAPPER_SOURCE}
RUN apt-get update && \
apt-get install -y --no-install-recommends \
build-essential \
ca-certificates \
cpio \
curl \
dosfstools \
gccgo \
genisoimage \
git \
isolinux \
less \
libblkid-dev \
libmount-dev \
libselinux1-dev \
locales \
module-init-tools \
openssh-client \
pkg-config \
qemu \
qemu-kvm \
rsync \
sudo \
syslinux-common \
vim \
wget \
xorriso
COPY .dockerignore.docker .dockerignore
########## General Configuration #####################
ARG DAPPER_HOST_ARCH=amd64
ARG HOST_ARCH=${DAPPER_HOST_ARCH}
ARG ARCH=${HOST_ARCH}
RUN cd /usr/local/src && \
for i in libselinux pcre3 util-linux; do \
apt-get build-dep -y $i && \
apt-get source -y $i \
;done
ARG OS_REPO=rancher
ARG HOSTNAME_DEFAULT=rancher
ARG DISTRIB_ID=RancherOS
RUN apt-get install -y gcc-arm-linux-gnueabihf g++-arm-linux-gnueabihf
RUN cd /usr/local/src/pcre3-* && \
autoreconf && \
CC=arm-linux-gnueabihf-gcc CXX=arm-linux-gnueabihf-g++ ./configure --host=arm-linux-gnueabihf --prefix=/usr/arm-linux-gnueabihf && \
make -j$(nproc) && \
make install
ARG DOCKER_VERSION=1.11.2
ARG DOCKER_PATCH_VERSION=v${DOCKER_VERSION}-ros1
ARG DOCKER_BUILD_VERSION=1.10.3
ARG DOCKER_BUILD_PATCH_VERSION=v${DOCKER_BUILD_VERSION}-ros1
ARG SELINUX_POLICY_URL=https://github.com/rancher/refpolicy/releases/download/v0.0.3/policy.29
RUN cd /usr/local/src/libselinux-* && \
CC=arm-linux-gnueabihf-gcc CXX=arm-linux-gnueabihf-g++ make CFLAGS=-Wall && \
make PREFIX=/usr/arm-linux-gnueabihf DESTDIR=/usr/arm-linux-gnueabihf install
ARG KERNEL_URL_amd64=https://github.com/rancher/os-kernel/releases/download/Ubuntu-4.4.0-47.68-rancher1/linux-4.4.24-rancher-x86.tar.gz
ARG KERNEL_URL_arm64=https://github.com/imikushin/os-kernel/releases/download/Estuary-4.4.0-arm64.8/linux-4.4.0-rancher-arm64.tar.gz
RUN cd /usr/local/src/util-linux-* && \
autoreconf && \
CC=arm-linux-gnueabihf-gcc CXX=arm-linux-gnueabihf-g++ ./configure --host=arm-linux-gnueabihf --prefix=/usr/arm-linux-gnueabihf \
--disable-all-programs \
--enable-libmount \
--enable-libblkid \
--enable-libuuid \
--enable-mount && \
make -j$(nproc) && \
make install
ARG DOCKER_URL_amd64=https://get.docker.com/builds/Linux/x86_64/docker-${DOCKER_VERSION}.tgz
ARG DOCKER_URL_arm=https://github.com/rancher/docker/releases/download/${DOCKER_PATCH_VERSION}/docker-${DOCKER_VERSION}_arm.tgz
ARG DOCKER_URL_arm64=https://github.com/rancher/docker/releases/download/${DOCKER_PATCH_VERSION}/docker-${DOCKER_VERSION}_arm64.tgz
ARG BUILD_DOCKER_URL_amd64=https://get.docker.com/builds/Linux/x86_64/docker-${DOCKER_BUILD_VERSION}
ARG BUILD_DOCKER_URL_arm=https://github.com/rancher/docker/releases/download/${DOCKER_BUILD_PATCH_VERSION}/docker-${DOCKER_BUILD_VERSION}_arm
ARG BUILD_DOCKER_URL_arm64=https://github.com/rancher/docker/releases/download/${DOCKER_BUILD_PATCH_VERSION}/docker-${DOCKER_BUILD_VERSION}_arm64
ARG OS_RELEASES_YML=https://releases.rancher.com/os
ARG OS_SERVICES_REPO=https://raw.githubusercontent.com/${OS_REPO}/os-services
ARG IMAGE_NAME=${OS_REPO}/os
ARG DFS_IMAGE=${OS_REPO}/docker:v${DOCKER_VERSION}-2
ARG OS_BASE_URL_amd64=https://github.com/rancher/os-base/releases/download/v2016.08.1-1/os-base_amd64.tar.xz
ARG OS_BASE_URL_arm64=https://github.com/rancher/os-base/releases/download/v2016.08.1-1/os-base_arm64.tar.xz
ARG OS_BASE_URL_arm=https://github.com/rancher/os-base/releases/download/v2016.08.1-1/os-base_arm.tar.xz
######################################################
# Set up environment and export all ARGS as ENV
ENV ARCH=${ARCH} \
HOST_ARCH=${HOST_ARCH}
ENV BUILD_DOCKER_URL=BUILD_DOCKER_URL_${ARCH} \
BUILD_DOCKER_URL_amd64=${BUILD_DOCKER_URL_amd64} \
BUILD_DOCKER_URL_arm=${BUILD_DOCKER_URL_arm} \
BUILD_DOCKER_URL_arm64=${BUILD_DOCKER_URL_arm64} \
DAPPER_HOST_ARCH=${DAPPER_HOST_ARCH} \
DFS_IMAGE=${DFS_IMAGE} \
DISTRIB_ID=${DISTRIB_ID} \
DOCKER_PATCH_VERSION=${DOCKER_PATCH_VERSION} \
DOCKER_URL=DOCKER_URL_${ARCH} \
DOCKER_URL_amd64=${DOCKER_URL_amd64} \
DOCKER_URL_arm=${DOCKER_URL_arm} \
DOCKER_URL_arm64=${DOCKER_URL_arm64} \
DOCKER_VERSION=${DOCKER_VERSION} \
DOWNLOADS=/usr/src/downloads \
GOPATH=/go \
GO_VERSION=1.7.1 \
GOARCH=$ARCH \
HOSTNAME_DEFAULT=${HOSTNAME_DEFAULT} \
IMAGE_NAME=${IMAGE_NAME} \
KERNEL_URL=KERNEL_URL_${ARCH} \
KERNEL_URL_amd64=${KERNEL_URL_amd64} \
KERNEL_URL_arm64=${KERNEL_URL_arm64} \
OS_BASE_SHA1=OS_BASE_SHA1_${ARCH} \
OS_BASE_URL=OS_BASE_URL_${ARCH} \
OS_BASE_URL_amd64=${OS_BASE_URL_amd64} \
OS_BASE_URL_arm=${OS_BASE_URL_arm} \
OS_BASE_URL_arm64=${OS_BASE_URL_arm64} \
OS_RELEASES_YML=${OS_RELEASES_YML} \
OS_REPO=${OS_REPO} \
OS_SERVICES_REPO=${OS_SERVICES_REPO} \
REPO_VERSION=master \
SELINUX_POLICY_URL=${SELINUX_POLICY_URL}
ENV PATH=${GOPATH}/bin:/usr/local/go/bin:$PATH
RUN mkdir -p ${DOWNLOADS}
# Download kernel
RUN rm /bin/sh && ln -s /bin/bash /bin/sh
RUN if [ -n "${!KERNEL_URL}" ]; then \
curl -fL ${!KERNEL_URL} > ${DOWNLOADS}/kernel.tar.gz \
;fi
# Download SELinux Policy
RUN curl -pfL ${SELINUX_POLICY_URL} > ${DOWNLOADS}/$(basename ${SELINUX_POLICY_URL})
# Install Go
COPY assets/go-dnsclient.patch ${DAPPER_SOURCE}
RUN ln -sf go-6 /usr/bin/go && \
curl -sfL https://storage.googleapis.com/golang/go${GO_VERSION}.src.tar.gz | tar -xzf - -C /usr/local && \
patch /usr/local/go/src/net/dnsclient_unix.go ${DAPPER_SOURCE}/go-dnsclient.patch && \
cd /usr/local/go/src && \
GOROOT_BOOTSTRAP=/usr GOARCH=${HOST_ARCH} GOHOSTARCH=${HOST_ARCH} ./make.bash && \
rm /usr/bin/go
# Install Host Docker
RUN curl -fL ${!BUILD_DOCKER_URL} > /usr/bin/docker && \
chmod +x /usr/bin/docker
# Install Trash
RUN go get github.com/rancher/trash
RUN go get gopkg.in/check.v1
# Install dapper
RUN curl -sL https://releases.rancher.com/dapper/latest/dapper-`uname -s`-`uname -m | sed 's/arm.*/arm/'` > /usr/bin/dapper && \
chmod +x /usr/bin/dapper
RUN cd ${DOWNLOADS} && \
curl -pfL ${!OS_BASE_URL} | tar xvJf -
ENTRYPOINT ["./scripts/entry"]
CMD ["ci"]
CMD make all

103
Makefile
View File

@@ -1,39 +1,88 @@
TARGETS := $(shell ls scripts | grep -vE 'clean|run|help')
FORCE_PULL := 0
DEV_BUILD := 0
ARCH := amd64
.dapper:
@echo Downloading dapper
@curl -sL https://releases.rancher.com/dapper/latest/dapper-`uname -s`-`uname -m` > .dapper.tmp
@@chmod +x .dapper.tmp
@./.dapper.tmp -v
@mv .dapper.tmp .dapper
include build.conf
include build.conf.$(ARCH)
$(TARGETS): .dapper
./.dapper $@
trash: .dapper
./.dapper -m bind trash
bin/ros:
mkdir -p $(dir $@)
ARCH=$(ARCH) VERSION=$(VERSION) ./scripts/mk-ros.sh $@
trash-keep: .dapper
./.dapper -m bind trash -k
build/host_ros: bin/ros
mkdir -p $(dir $@)
ifeq "$(ARCH)" "amd64"
ln -sf ../bin/ros $@
else
ARCH=amd64 VERSION=$(VERSION) ./scripts/mk-ros.sh $@
endif
deps: trash
pwd := $(shell pwd)
include scripts/build-common
build/initrd/.id: .dapper
./.dapper prepare
run: build/initrd/.id .dapper
./.dapper -m bind build-target
./scripts/run
assets/docker:
mkdir -p $(dir $@)
curl -L "$(DOCKER_BINARY_URL)" > $@
chmod +x $@
shell-bind: .dapper
./.dapper -m bind -s
clean:
@./scripts/clean
ifdef COMPILED_KERNEL_URL
help:
@./scripts/help
installer: minimal
docker build -t $(IMAGE_NAME):$(VERSION) .
.DEFAULT_GOAL := default
$(DIST)/artifacts/vmlinuz: $(BUILD)/kernel/
mkdir -p $(dir $@)
mv $(BUILD)/kernel/boot/vmlinuz* $@
.PHONY: $(TARGETS)
$(BUILD)/kernel/:
mkdir -p $@
curl -L "$(COMPILED_KERNEL_URL)" | tar -xzf - -C $@
$(DIST)/artifacts/initrd: bin/ros assets/docker $(BUILD)/kernel/ $(BUILD)/images.tar
mkdir -p $(dir $@)
ARCH=$(ARCH) DFS_IMAGE=$(DFS_IMAGE) DEV_BUILD=$(DEV_BUILD) ./scripts/mk-initrd.sh $@
$(DIST)/artifacts/rancheros.iso: minimal
./scripts/mk-rancheros-iso.sh
all: minimal installer iso
initrd: $(DIST)/artifacts/initrd
minimal: initrd $(DIST)/artifacts/vmlinuz
iso: $(DIST)/artifacts/rancheros.iso $(DIST)/artifacts/iso-checksums.txt
test: minimal
cd tests/integration && tox
.PHONY: all minimal initrd iso installer test
endif
$(BUILD)/images.tar: build/host_ros
ARCH=$(ARCH) FORCE_PULL=$(FORCE_PULL) ./scripts/mk-images-tar.sh
$(DIST)/artifacts/rootfs.tar.gz: bin/ros assets/docker $(BUILD)/images.tar
mkdir -p $(dir $@)
ARCH=$(ARCH) DFS_IMAGE=$(DFS_IMAGE) DEV_BUILD=$(DEV_BUILD) IS_ROOTFS=1 ./scripts/mk-initrd.sh $@
$(DIST)/artifacts/iso-checksums.txt: $(DIST)/artifacts/rancheros.iso
./scripts/mk-iso-checksums-txt.sh
version:
@echo $(VERSION)
rootfs: $(DIST)/artifacts/rootfs.tar.gz
.PHONY: rootfs version bin/ros

View File

@@ -1,82 +1,87 @@
# RancherOS
The smallest, easiest way to run Docker in production at scale. Everything in RancherOS is a container managed by Docker. This includes system services such as udev and rsyslog. RancherOS includes only the bare minimum amount of software needed to run Docker. This keeps the binary download of RancherOS very small. Everything else can be pulled in dynamically through Docker.
The smallest, easiest way to run Docker in production at scale. Everything in RancherOS is a container managed by Docker. This includes system services such as udev and rsyslog. RancherOS includes only the bare minimum amount of software needed to run Docker. This keeps the binary download of RancherOS to about 25MB. Everything else can be pulled in dynamically through Docker.
## How this works
Everything in RancherOS is a Docker container. We accomplish this by launching two instances of
Docker. One is what we call the system Docker which runs as the first process. System Docker then launches
Docker. One is what we call the system Docker which runs as PID 1. System Docker then launches
a container that runs the user Docker. The user Docker is then the instance that gets primarily
used to create containers. We created this separation because it seemed logical and also
it would really be bad if somebody did `docker rm -f $(docker ps -qa)` and deleted the entire OS.
![How it works](docs/rancheros.png "How it works")
## Latest Release
**v0.7.1 - Docker 1.12.3 - Linux 4.4.24**
**v0.4.2 - Docker 1.9.1 - Linux 4.2**
### ISO
https://releases.rancher.com/os/latest/rancheros.iso
https://releases.rancher.com/os/v0.7.1/rancheros.iso
https://releases.rancher.com/os/v0.4.2/rancheros.iso
**Note**: you must login using `rancher` for username and password.
### Additional Downloads
#### Latest
* https://releases.rancher.com/os/latest/initrd
* https://releases.rancher.com/os/latest/iso-checksums.txt
* https://releases.rancher.com/os/latest/rancheros-openstack.img
* https://releases.rancher.com/os/latest/rancheros-raspberry-pi.zip
* https://releases.rancher.com/os/latest/rancheros-v0.7.1.tar.gz
* https://releases.rancher.com/os/latest/rancheros-v0.4.1.tar.gz
* https://releases.rancher.com/os/latest/rancheros.iso
* https://releases.rancher.com/os/latest/rootfs_arm.tar.gz
* https://releases.rancher.com/os/latest/rootfs_arm64.tar.gz
* https://releases.rancher.com/os/latest/rootfs.tar.gz
* https://releases.rancher.com/os/latest/vmlinuz
#### v0.7.1
* https://releases.rancher.com/os/v0.7.1/initrd
* https://releases.rancher.com/os/v0.7.1/iso-checksums.txt
* https://releases.rancher.com/os/v0.7.1/rancheros-openstack.img
* https://releases.rancher.com/os/v0.7.1/rancheros-raspberry-pi.zip
* https://releases.rancher.com/os/v0.7.1/rancheros-v0.7.1.tar.gz
* https://releases.rancher.com/os/v0.7.1/rancheros.iso
* https://releases.rancher.com/os/v0.7.1/rootfs_arm.tar.gz
* https://releases.rancher.com/os/v0.7.1/rootfs_arm64.tar.gz
* https://releases.rancher.com/os/v0.7.1/rootfs.tar.gz
* https://releases.rancher.com/os/v0.7.1/vmlinuz
* https://releases.rancher.com/os/v0.4.2/initrd
* https://releases.rancher.com/os/v0.4.2/iso-checksums.txt
* https://releases.rancher.com/os/v0.4.1/rancheros-v0.4.1.tar.gz
* https://releases.rancher.com/os/v0.4.2/rancheros.iso
* https://releases.rancher.com/os/v0.4.2/vmlinuz
**Note**: you can use `http` instead of `https` in the above URLs, e.g. for iPXE.
### Amazon
SSH keys are added to the **`rancher`** user, so you must log in using the **rancher** user.
We have 2 different [virtualization types of AMIs](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/virtualization_types.html). SSH keys are added to the **`rancher`** user, so you must log in using the **rancher** user.
**HVM**
Region | Type | AMI |
-------|------|------
ap-northeast-1 | HVM | [ami-75954214](https://console.aws.amazon.com/ec2/home?region=ap-northeast-1#launchInstanceWizard:ami=ami-75954214)
ap-northeast-2 | HVM | [ami-690dd807](https://console.aws.amazon.com/ec2/home?region=ap-northeast-2#launchInstanceWizard:ami=ami-690dd807)
ap-south-1 | HVM | [ami-ed8cf982](https://console.aws.amazon.com/ec2/home?region=ap-south-1#launchInstanceWizard:ami=ami-ed8cf982)
ap-southeast-1 | HVM | [ami-27bc6644](https://console.aws.amazon.com/ec2/home?region=ap-southeast-1#launchInstanceWizard:ami=ami-27bc6644)
ap-southeast-2 | HVM | [ami-67172604](https://console.aws.amazon.com/ec2/home?region=ap-southeast-2#launchInstanceWizard:ami=ami-67172604)
eu-central-1 | HVM | [ami-e88d7f87](https://console.aws.amazon.com/ec2/home?region=eu-central-1#launchInstanceWizard:ami=ami-e88d7f87)
eu-west-1 | HVM | [ami-934837e0](https://console.aws.amazon.com/ec2/home?region=eu-west-1#launchInstanceWizard:ami=ami-934837e0)
sa-east-1 | HVM | [ami-6949d905](https://console.aws.amazon.com/ec2/home?region=sa-east-1#launchInstanceWizard:ami=ami-6949d905)
us-east-1 | HVM | [ami-a8d2a4bf](https://console.aws.amazon.com/ec2/home?region=us-east-1#launchInstanceWizard:ami=ami-a8d2a4bf)
us-west-1 | HVM | [ami-fccb879c](https://console.aws.amazon.com/ec2/home?region=us-west-1#launchInstanceWizard:ami=ami-fccb879c)
us-west-2 | HVM | [ami-1ed3007e](https://console.aws.amazon.com/ec2/home?region=us-west-2#launchInstanceWizard:ami=ami-1ed3007e)
### Google Compute Engine
ap-northeast-1 | HVM | [ami-e4380c8a](https://console.aws.amazon.com/ec2/home?region=ap-northeast-1#launchInstanceWizard:ami=ami-e4380c8a)
ap-southeast-1 | HVM | [ami-4da5672e](https://console.aws.amazon.com/ec2/home?region=ap-southeast-1#launchInstanceWizard:ami=ami-4da5672e)
ap-southeast-2 | HVM | [ami-01a5fe62](https://console.aws.amazon.com/ec2/home?region=ap-southeast-2#launchInstanceWizard:ami=ami-01a5fe62)
eu-central-1 | HVM | [ami-a75b44cb](https://console.aws.amazon.com/ec2/home?region=eu-central-1#launchInstanceWizard:ami=ami-a75b44cb)
eu-west-1 | HVM | [ami-7989240a](https://console.aws.amazon.com/ec2/home?region=eu-west-1#launchInstanceWizard:ami=ami-7989240a)
sa-east-1 | HVM | [ami-1f4bcc73](https://console.aws.amazon.com/ec2/home?region=sa-east-1#launchInstanceWizard:ami=ami-1f4bcc73)
us-east-1 | HVM | [ami-53045239](https://console.aws.amazon.com/ec2/home?region=us-east-1#launchInstanceWizard:ami=ami-53045239)
us-west-1 | HVM | [ami-6d2d470d](https://console.aws.amazon.com/ec2/home?region=us-west-1#launchInstanceWizard:ami=ami-6d2d470d)
us-west-2 | HVM | [ami-8b0c12ea](https://console.aws.amazon.com/ec2/home?region=us-west-2#launchInstanceWizard:ami=ami-8b0c12ea)
We are providing a disk image that users can download and import for use in Google Compute Engine. The image can be obtained from the release artifacts for RancherOS.
**Paravirtual**
[Download Image](https://github.com/rancher/os/releases/download/v0.7.1/rancheros-v0.7.1.tar.gz)
Region | Type | AMI
---|--- | ---
ap-northeast-1 | PV | [ami-98380cf6](https://console.aws.amazon.com/ec2/home?region=ap-northeast-1#launchInstanceWizard:ami=ami-98380cf6)
ap-southeast-1 | PV | [ami-94aa68f7](https://console.aws.amazon.com/ec2/home?region=ap-southeast-1#launchInstanceWizard:ami=ami-94aa68f7)
ap-southeast-2 | PV | [ami-3ca5fe5f](https://console.aws.amazon.com/ec2/home?region=ap-southeast-2#launchInstanceWizard:ami=ami-3ca5fe5f)
eu-central-1 | PV | [ami-5e5a4532](https://console.aws.amazon.com/ec2/home?region=eu-central-1#launchInstanceWizard:ami=ami-5e5a4532)
eu-west-1 | PV | [ami-2e8e235d](https://console.aws.amazon.com/ec2/home?region=eu-west-1#launchInstanceWizard:ami=ami-2e8e235d)
sa-east-1 | PV | [ami-6249ce0e](https://console.aws.amazon.com/ec2/home?region=sa-east-1#launchInstanceWizard:ami=ami-6249ce0e)
us-east-1 | PV | [ami-850452ef](https://console.aws.amazon.com/ec2/home?region=us-east-1#launchInstanceWizard:ami=ami-850452ef)
us-west-1 | PV | [ami-30d6bd50](https://console.aws.amazon.com/ec2/home?region=us-west-1#launchInstanceWizard:ami=ami-30d6bd50)
us-west-2 | PV | [ami-550d1334](https://console.aws.amazon.com/ec2/home?region=us-west-2#launchInstanceWizard:ami=ami-550d1334)
Please follow the directions at our [docs to launch in GCE](http://docs.rancher.com/os/running-rancheros/cloud/gce/).
### Google Compute Engine (Experimental)
We are providing a disk image that users can download and import for use in Google Compute Engine. The image can be obtained from the release artifacts for RancherOS v0.3.0 or later.
[Download Image](https://github.com/rancher/os/releases/download/v0.4.1/rancheros-v0.4.1.tar.gz)
Please follow the directions at our [docs to launch in GCE](http://docs.rancher.com/os/running-rancheros/cloud/gce/).
#### Known issues/ToDos
* Add GCE daemon support. (Manages users)
## Documentation for RancherOS
@@ -103,3 +108,4 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -1,12 +0,0 @@
{
"name": "bridge",
"type": "bridge",
"bridge": "docker-sys",
"isDefaultGateway": true,
"ipMasq": true,
"hairpinMode": true,
"ipam": {
"type": "host-local",
"subnet": "172.18.42.1/16"
}
}

View File

@@ -1 +0,0 @@
bridge.d/

View File

@@ -1,7 +0,0 @@
{
"path": "/usr/bin/ros",
"args": [
"cni-glue",
"poststop"
]
}

View File

@@ -1,6 +0,0 @@
{
"path": "/usr/bin/ros",
"args": [
"cni-glue"
]
}

View File

@@ -1,18 +0,0 @@
296a297,300
> conf.update(name)
> }
>
> func (conf *resolverConfig) update(name string) {
300a305,316
> }
>
> func UpdateDnsConf() {
> resolvConf.initOnce.Do(resolvConf.init)
>
> // Ensure only one update at a time checks resolv.conf.
> if !resolvConf.tryAcquireSema() {
> return
> }
> defer resolvConf.releaseSema()
>
> resolvConf.update("/etc/resolv.conf")

View File

@@ -1,2 +0,0 @@
SELINUX=permissive
SELINUXTYPE=ros

View File

@@ -1 +0,0 @@
unconfined_r:unconfined_t:s0

View File

@@ -1,3 +0,0 @@
process = "system_u:system_r:svirt_lxc_net_t:s0"
content = "system_u:object_r:virt_var_lib_t:s0"
file = "system_u:object_r:svirt_lxc_file_t:s0"

View File

@@ -1 +0,0 @@
__default__:unconfined_u:s0-s0:c0.c1023

3
build.conf Normal file
View File

@@ -0,0 +1,3 @@
IMAGE_NAME=rancher/os
VERSION=v0.4.3
DFS_IMAGE=rancher/docker:v1.10.1

2
build.conf.amd64 Normal file
View File

@@ -0,0 +1,2 @@
COMPILED_KERNEL_URL=https://github.com/rancher/os-kernel/releases/download/Ubuntu-4.2.0-28.33-rancher/linux-4.2.8-ckt3-rancher-x86.tar.gz
DOCKER_BINARY_URL=https://get.docker.com/builds/Linux/x86_64/docker-1.10.1

1
build.conf.arm Normal file
View File

@@ -0,0 +1 @@
DOCKER_BINARY_URL=https://github.com/rancher/docker/releases/download/v1.10.1-ros_arm/docker-1.10.1

17
build.sh Executable file
View File

@@ -0,0 +1,17 @@
#!/bin/bash
set -e
if [ "$1" != "--dev" ]; then
echo
echo Running \"production\" build. Will use lzma to compress initrd, which is somewhat slow...
echo Ctrl+C if you don\'t want this.
echo
echo For \"developer\" builds, run ./build.sh --dev
echo
dapper make all
else
dapper make DEV_BUILD=1 all
fi
ls -lh dist/artifacts

View File

@@ -1,4 +1,4 @@
package cloudinitexecute
package cloudinit
import (
"os"

383
cmd/cloudinit/cloudinit.go Normal file
View File

@@ -0,0 +1,383 @@
// Copyright 2015 CoreOS, Inc.
// Copyright 2015 Rancher Labs, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cloudinit
import (
"errors"
"flag"
"io/ioutil"
"os"
"strings"
"sync"
"syscall"
"time"
yaml "github.com/cloudfoundry-incubator/candiedyaml"
log "github.com/Sirupsen/logrus"
"github.com/coreos/coreos-cloudinit/config"
"github.com/coreos/coreos-cloudinit/datasource"
"github.com/coreos/coreos-cloudinit/datasource/configdrive"
"github.com/coreos/coreos-cloudinit/datasource/file"
"github.com/coreos/coreos-cloudinit/datasource/metadata/digitalocean"
"github.com/coreos/coreos-cloudinit/datasource/metadata/ec2"
"github.com/coreos/coreos-cloudinit/datasource/metadata/packet"
"github.com/coreos/coreos-cloudinit/datasource/proc_cmdline"
"github.com/coreos/coreos-cloudinit/datasource/url"
"github.com/coreos/coreos-cloudinit/pkg"
"github.com/coreos/coreos-cloudinit/system"
"github.com/rancher/netconf"
rancherConfig "github.com/rancher/os/config"
)
const (
datasourceInterval = 100 * time.Millisecond
datasourceMaxInterval = 30 * time.Second
datasourceTimeout = 5 * time.Minute
sshKeyName = "rancheros-cloud-config"
)
var (
save bool
execute bool
network bool
flags *flag.FlagSet
)
func init() {
flags = flag.NewFlagSet(os.Args[0], flag.ContinueOnError)
flags.BoolVar(&network, "network", true, "use network based datasources")
flags.BoolVar(&save, "save", false, "save cloud config and exit")
flags.BoolVar(&execute, "execute", false, "execute saved cloud config")
}
func saveFiles(cloudConfigBytes, scriptBytes []byte, metadata datasource.Metadata) error {
os.MkdirAll(rancherConfig.CloudConfigDir, os.ModeDir|0600)
os.Remove(rancherConfig.CloudConfigScriptFile)
os.Remove(rancherConfig.CloudConfigBootFile)
os.Remove(rancherConfig.MetaDataFile)
if len(scriptBytes) > 0 {
log.Infof("Writing to %s", rancherConfig.CloudConfigScriptFile)
if err := ioutil.WriteFile(rancherConfig.CloudConfigScriptFile, scriptBytes, 500); err != nil {
log.Errorf("Error while writing file %s: %v", rancherConfig.CloudConfigScriptFile, err)
return err
}
}
if err := ioutil.WriteFile(rancherConfig.CloudConfigBootFile, cloudConfigBytes, 400); err != nil {
return err
}
log.Infof("Written to %s:\n%s", rancherConfig.CloudConfigBootFile, string(cloudConfigBytes))
metaDataBytes, err := yaml.Marshal(metadata)
if err != nil {
return err
}
if err = ioutil.WriteFile(rancherConfig.MetaDataFile, metaDataBytes, 400); err != nil {
return err
}
log.Infof("Written to %s:\n%s", rancherConfig.MetaDataFile, string(metaDataBytes))
return nil
}
func currentDatasource() (datasource.Datasource, error) {
cfg, err := rancherConfig.LoadConfig()
if err != nil {
log.WithFields(log.Fields{"err": err}).Error("Failed to read rancher config")
return nil, err
}
dss := getDatasources(cfg)
if len(dss) == 0 {
return nil, nil
}
ds := selectDatasource(dss)
return ds, nil
}
func saveCloudConfig() error {
userDataBytes, metadata, err := fetchUserData()
if err != nil {
return err
}
userData := string(userDataBytes)
scriptBytes := []byte{}
if config.IsScript(userData) {
scriptBytes = userDataBytes
userDataBytes = []byte{}
} else if isCompose(userData) {
if userDataBytes, err = composeToCloudConfig(userDataBytes); err != nil {
log.Errorf("Failed to convert compose to cloud-config syntax: %v", err)
return err
}
} else if config.IsCloudConfig(userData) {
if _, err := rancherConfig.ReadConfig(userDataBytes, false); err != nil {
log.WithFields(log.Fields{"cloud-config": userData, "err": err}).Warn("Failed to parse cloud-config, not saving.")
userDataBytes = []byte{}
}
} else {
log.Errorf("Unrecognized user-data\n%s", userData)
userDataBytes = []byte{}
}
if _, err := rancherConfig.ReadConfig(userDataBytes, false); err != nil {
log.WithFields(log.Fields{"cloud-config": userData, "err": err}).Warn("Failed to parse cloud-config")
return errors.New("Failed to parse cloud-config")
}
return saveFiles(userDataBytes, scriptBytes, metadata)
}
func fetchUserData() ([]byte, datasource.Metadata, error) {
var metadata datasource.Metadata
ds, err := currentDatasource()
if err != nil || ds == nil {
log.Errorf("Failed to select datasource: %v", err)
return nil, metadata, err
}
log.Infof("Fetching user-data from datasource %v", ds.Type())
userDataBytes, err := ds.FetchUserdata()
if err != nil {
log.Errorf("Failed fetching user-data from datasource: %v", err)
return nil, metadata, err
}
log.Infof("Fetching meta-data from datasource of type %v", ds.Type())
metadata, err = ds.FetchMetadata()
if err != nil {
log.Errorf("Failed fetching meta-data from datasource: %v", err)
return nil, metadata, err
}
return userDataBytes, metadata, nil
}
func SetHostname(cc *rancherConfig.CloudConfig) (string, error) {
name, _ := os.Hostname()
if cc.Hostname != "" {
name = cc.Hostname
}
if name != "" {
//set hostname
if err := syscall.Sethostname([]byte(name)); err != nil {
log.WithFields(log.Fields{"err": err, "hostname": name}).Error("Error setting hostname")
return "", err
}
}
return name, nil
}
func executeCloudConfig() error {
cc, err := rancherConfig.LoadConfig()
if err != nil {
return err
}
if _, err := SetHostname(cc); err != nil {
return err
}
if len(cc.SSHAuthorizedKeys) > 0 {
authorizeSSHKeys("rancher", cc.SSHAuthorizedKeys, sshKeyName)
authorizeSSHKeys("docker", cc.SSHAuthorizedKeys, sshKeyName)
}
for _, file := range cc.WriteFiles {
f := system.File{File: file}
fullPath, err := system.WriteFile(&f, "/")
if err != nil {
log.WithFields(log.Fields{"err": err, "path": fullPath}).Error("Error writing file")
continue
}
log.Printf("Wrote file %s to filesystem", fullPath)
}
return nil
}
func Main() {
flags.Parse(os.Args[1:])
log.Infof("Running cloud-init: save=%v, execute=%v", save, execute)
if save {
err := saveCloudConfig()
if err != nil {
log.WithFields(log.Fields{"err": err}).Error("Failed to save cloud-config")
}
}
if execute {
err := executeCloudConfig()
if err != nil {
log.WithFields(log.Fields{"err": err}).Error("Failed to execute cloud-config")
}
}
}
// getDatasources creates a slice of possible Datasources for cloudinit based
// on the different source command-line flags.
func getDatasources(cfg *rancherConfig.CloudConfig) []datasource.Datasource {
dss := make([]datasource.Datasource, 0, 5)
for _, ds := range cfg.Rancher.CloudInit.Datasources {
parts := strings.SplitN(ds, ":", 2)
switch parts[0] {
case "ec2":
if network {
if len(parts) == 1 {
dss = append(dss, ec2.NewDatasource(ec2.DefaultAddress))
} else {
dss = append(dss, ec2.NewDatasource(parts[1]))
}
}
case "file":
if len(parts) == 2 {
dss = append(dss, file.NewDatasource(parts[1]))
}
case "url":
if network {
if len(parts) == 2 {
dss = append(dss, url.NewDatasource(parts[1]))
}
}
case "cmdline":
if network {
if len(parts) == 1 {
dss = append(dss, proc_cmdline.NewDatasource())
}
}
case "configdrive":
if len(parts) == 2 {
dss = append(dss, configdrive.NewDatasource(parts[1]))
}
case "digitalocean":
if network {
if len(parts) == 1 {
dss = append(dss, digitalocean.NewDatasource(digitalocean.DefaultAddress))
} else {
dss = append(dss, digitalocean.NewDatasource(parts[1]))
}
} else {
enableDoLinkLocal()
}
case "gce":
if network {
gceCloudConfigFile, err := GetAndCreateGceDataSourceFilename()
if err != nil {
log.Errorf("Could not retrieve GCE CloudConfig %s", err)
continue
}
dss = append(dss, file.NewDatasource(gceCloudConfigFile))
}
case "packet":
if !network {
enablePacketNetwork(&cfg.Rancher)
}
dss = append(dss, packet.NewDatasource("https://metadata.packet.net/"))
}
}
return dss
}
func enableDoLinkLocal() {
err := netconf.ApplyNetworkConfigs(&netconf.NetworkConfig{
Interfaces: map[string]netconf.InterfaceConfig{
"eth0": {
IPV4LL: true,
},
},
})
if err != nil {
log.Errorf("Failed to apply link local on eth0: %v", err)
}
}
// selectDatasource attempts to choose a valid Datasource to use based on its
// current availability. The first Datasource to report to be available is
// returned. Datasources will be retried if possible if they are not
// immediately available. If all Datasources are permanently unavailable or
// datasourceTimeout is reached before one becomes available, nil is returned.
func selectDatasource(sources []datasource.Datasource) datasource.Datasource {
ds := make(chan datasource.Datasource)
stop := make(chan struct{})
var wg sync.WaitGroup
for _, s := range sources {
wg.Add(1)
go func(s datasource.Datasource) {
defer wg.Done()
duration := datasourceInterval
for {
log.Infof("Checking availability of %q\n", s.Type())
if s.IsAvailable() {
ds <- s
return
} else if !s.AvailabilityChanges() {
return
}
select {
case <-stop:
return
case <-time.After(duration):
duration = pkg.ExpBackoff(duration, datasourceMaxInterval)
}
}
}(s)
}
done := make(chan struct{})
go func() {
wg.Wait()
close(done)
}()
var s datasource.Datasource
select {
case s = <-ds:
case <-done:
case <-time.After(datasourceTimeout):
}
close(stop)
return s
}
func isCompose(content string) bool {
return strings.HasPrefix(content, "#compose\n")
}
func composeToCloudConfig(bytes []byte) ([]byte, error) {
compose := make(map[interface{}]interface{})
err := yaml.Unmarshal(bytes, &compose)
if err != nil {
return nil, err
}
return yaml.Marshal(map[interface{}]interface{}{
"rancher": map[interface{}]interface{}{
"services": compose,
},
})
}

140
cmd/cloudinit/gce.go Normal file
View File

@@ -0,0 +1,140 @@
package cloudinit
import (
"io/ioutil"
"strings"
log "github.com/Sirupsen/logrus"
yaml "github.com/cloudfoundry-incubator/candiedyaml"
"google.golang.org/cloud/compute/metadata"
)
type GceCloudConfig struct {
FileName string
UserData string
NonUserDataSSHKeys []string
}
const (
gceCloudConfigFile = "/var/lib/rancher/conf/gce_cloudinit_config.yml"
)
func NewGceCloudConfig() *GceCloudConfig {
userData, err := metadata.InstanceAttributeValue("user-data")
if err != nil {
log.Errorf("Could not retrieve user-data: %s", err)
}
projectSSHKeys, err := metadata.ProjectAttributeValue("sshKeys")
if err != nil {
log.Errorf("Could not retrieve project SSH Keys: %s", err)
}
instanceSSHKeys, err := metadata.InstanceAttributeValue("sshKeys")
if err != nil {
log.Errorf("Could not retrieve instance SSH Keys: %s", err)
}
nonUserDataSSHKeysRaw := projectSSHKeys + "\n" + instanceSSHKeys
nonUserDataSSHKeys := gceSshKeyFormatter(nonUserDataSSHKeysRaw)
gceCC := &GceCloudConfig{
FileName: gceCloudConfigFile,
UserData: userData,
NonUserDataSSHKeys: nonUserDataSSHKeys,
}
return gceCC
}
func GetAndCreateGceDataSourceFilename() (string, error) {
gceCC := NewGceCloudConfig()
err := gceCC.saveToFile(gceCC.FileName)
if err != nil {
log.Errorf("Error: %s", err)
return "", err
}
return gceCC.FileName, nil
}
func (cc *GceCloudConfig) saveToFile(filename string) error {
//Get Merged UserData sshkeys
data, err := cc.getMergedUserData()
if err != nil {
log.Errorf("Could not process userdata: %s", err)
return err
}
//write file
writeFile(filename, data)
return nil
}
func (cc *GceCloudConfig) getMergedUserData() ([]byte, error) {
var returnUserData []byte
userdata := make(map[string]interface{})
if cc.UserData != "" {
log.Infof("Found UserData Config")
err := yaml.Unmarshal([]byte(cc.UserData), &userdata)
if err != nil {
log.Errorf("Could not unmarshal data: %s", err)
return nil, err
}
}
var auth_keys []string
if _, exists := userdata["ssh_authorized_keys"]; exists {
udSshKeys := userdata["ssh_authorized_keys"].([]interface{})
log.Infof("userdata %s", udSshKeys)
for _, value := range udSshKeys {
auth_keys = append(auth_keys, value.(string))
}
}
if cc.NonUserDataSSHKeys != nil {
for _, value := range cc.NonUserDataSSHKeys {
auth_keys = append(auth_keys, value)
}
}
userdata["ssh_authorized_keys"] = auth_keys
yamlUserData, err := yaml.Marshal(&userdata)
if err != nil {
log.Errorf("Could not Marshal userdata: %s", err)
return nil, err
} else {
returnUserData = append([]byte("#cloud-config\n"), yamlUserData...)
}
return returnUserData, nil
}
func writeFile(filename string, data []byte) error {
if err := ioutil.WriteFile(filename, data, 400); err != nil {
log.Errorf("Could not write file %v", err)
return err
}
return nil
}
func gceSshKeyFormatter(rawKeys string) []string {
keySlice := strings.Split(rawKeys, "\n")
var cloudFormatedKeys []string
if len(keySlice) > 0 {
for i := range keySlice {
keyString := keySlice[i]
sIdx := strings.Index(keyString, ":")
if sIdx != -1 {
key := strings.TrimSpace(keyString[sIdx+1:])
keyA := strings.Split(key, " ")
key = strings.Join(keyA, " ")
if key != "" {
cloudFormatedKeys = append(cloudFormatedKeys, key)
}
}
}
}
return cloudFormatedKeys
}

95
cmd/cloudinit/packet.go Normal file
View File

@@ -0,0 +1,95 @@
package cloudinit
import (
"fmt"
"net/http"
"os"
"path"
"strings"
yaml "github.com/cloudfoundry-incubator/candiedyaml"
"github.com/Sirupsen/logrus"
"github.com/packethost/packngo/metadata"
"github.com/rancher/netconf"
rancherConfig "github.com/rancher/os/config"
)
func enablePacketNetwork(cfg *rancherConfig.RancherConfig) {
bootStrapped := false
for _, v := range cfg.Network.Interfaces {
if v.Address != "" {
if err := netconf.ApplyNetworkConfigs(&cfg.Network); err != nil {
logrus.Errorf("Failed to bootstrap network: %v", err)
return
}
bootStrapped = true
break
}
}
if !bootStrapped {
return
}
c := metadata.NewClient(http.DefaultClient)
m, err := c.Metadata.Get()
if err != nil {
logrus.Errorf("Failed to get Packet metadata: %v", err)
return
}
bondCfg := netconf.InterfaceConfig{
Addresses: []string{},
BondOpts: map[string]string{
"lacp_rate": "1",
"xmit_hash_policy": "layer3+4",
"downdelay": "200",
"updelay": "200",
"miimon": "100",
"mode": "4",
},
}
netCfg := netconf.NetworkConfig{
Interfaces: map[string]netconf.InterfaceConfig{},
}
for _, iface := range m.Network.Interfaces {
netCfg.Interfaces["mac="+iface.Mac] = netconf.InterfaceConfig{
Bond: "bond0",
}
}
for _, addr := range m.Network.Addresses {
bondCfg.Addresses = append(bondCfg.Addresses, fmt.Sprintf("%s/%d", addr.Address, addr.Cidr))
if addr.Gateway != "" {
if addr.AddressFamily == 4 {
if addr.Public {
bondCfg.Gateway = addr.Gateway
}
} else {
bondCfg.GatewayIpv6 = addr.Gateway
}
}
if addr.AddressFamily == 4 && strings.HasPrefix(addr.Gateway, "10.") {
bondCfg.PostUp = append(bondCfg.PostUp, "ip route add 10.0.0.0/8 via "+addr.Gateway)
}
}
netCfg.Interfaces["bond0"] = bondCfg
bytes, _ := yaml.Marshal(netCfg)
logrus.Debugf("Generated network config: %s", string(bytes))
cc := rancherConfig.CloudConfig{
Rancher: rancherConfig.RancherConfig{
Network: netCfg,
},
}
if err := os.MkdirAll(path.Dir(rancherConfig.CloudConfigNetworkFile), 0700); err != nil {
logrus.Errorf("Failed to create directory for file %s: %v", rancherConfig.CloudConfigNetworkFile, err)
}
if err := rancherConfig.WriteToFile(cc, rancherConfig.CloudConfigNetworkFile); err != nil {
logrus.Errorf("Failed to save config file %s: %v", rancherConfig.CloudConfigNetworkFile, err)
}
}

View File

@@ -1,186 +0,0 @@
package cloudinitexecute
import (
"flag"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path"
"strings"
log "github.com/Sirupsen/logrus"
"github.com/coreos/coreos-cloudinit/system"
rancherConfig "github.com/rancher/os/config"
"github.com/rancher/os/docker"
"github.com/rancher/os/util"
"golang.org/x/net/context"
)
const (
resizeStamp = "/var/lib/rancher/resizefs.done"
sshKeyName = "rancheros-cloud-config"
)
var (
console bool
preConsole bool
flags *flag.FlagSet
)
func init() {
flags = flag.NewFlagSet(os.Args[0], flag.ContinueOnError)
flags.BoolVar(&console, "console", false, "apply console configuration")
flags.BoolVar(&preConsole, "pre-console", false, "apply pre-console configuration")
}
func Main() {
flags.Parse(os.Args[1:])
log.Infof("Running cloud-init-execute: pre-console=%v, console=%v", preConsole, console)
cfg := rancherConfig.LoadConfig()
if !console && !preConsole {
console = true
preConsole = true
}
if console {
ApplyConsole(cfg)
}
if preConsole {
applyPreConsole(cfg)
}
}
func ApplyConsole(cfg *rancherConfig.CloudConfig) {
if len(cfg.SSHAuthorizedKeys) > 0 {
authorizeSSHKeys("rancher", cfg.SSHAuthorizedKeys, sshKeyName)
authorizeSSHKeys("docker", cfg.SSHAuthorizedKeys, sshKeyName)
}
WriteFiles(cfg, "console")
for _, mount := range cfg.Mounts {
if len(mount) != 4 {
log.Errorf("Unable to mount %s: must specify exactly four arguments", mount[1])
}
device := util.ResolveDevice(mount[0])
if mount[2] == "swap" {
cmd := exec.Command("swapon", device)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err := cmd.Run()
if err != nil {
log.Errorf("Unable to swapon %s: %v", device, err)
}
continue
}
cmdArgs := []string{device, mount[1]}
if mount[2] != "" {
cmdArgs = append(cmdArgs, "-t", mount[2])
}
if mount[3] != "" {
cmdArgs = append(cmdArgs, "-o", mount[3])
}
cmd := exec.Command("mount", cmdArgs...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
log.Errorf("Failed to mount %s: %v", mount[1], err)
}
}
for _, runcmd := range cfg.Runcmd {
if len(runcmd) == 0 {
continue
}
cmd := exec.Command(runcmd[0], runcmd[1:]...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
log.Errorf("Failed to run %s: %v", runcmd, err)
}
}
}
func WriteFiles(cfg *rancherConfig.CloudConfig, container string) {
for _, file := range cfg.WriteFiles {
fileContainer := file.Container
if fileContainer == "" {
fileContainer = "console"
}
if fileContainer != container {
continue
}
f := system.File{
File: file.File,
}
fullPath, err := system.WriteFile(&f, "/")
if err != nil {
log.WithFields(log.Fields{"err": err, "path": fullPath}).Error("Error writing file")
continue
}
log.Printf("Wrote file %s to filesystem", fullPath)
}
}
func applyPreConsole(cfg *rancherConfig.CloudConfig) {
if _, err := os.Stat(resizeStamp); os.IsNotExist(err) && cfg.Rancher.ResizeDevice != "" {
if err := resizeDevice(cfg); err == nil {
os.Create(resizeStamp)
} else {
log.Errorf("Failed to resize %s: %s", cfg.Rancher.ResizeDevice, err)
}
}
for k, v := range cfg.Rancher.Sysctl {
elems := []string{"/proc", "sys"}
elems = append(elems, strings.Split(k, ".")...)
path := path.Join(elems...)
if err := ioutil.WriteFile(path, []byte(v), 0644); err != nil {
log.Errorf("Failed to set sysctl key %s: %s", k, err)
}
}
client, err := docker.NewSystemClient()
if err != nil {
log.Error(err)
}
for _, restart := range cfg.Rancher.RestartServices {
if err = client.ContainerRestart(context.Background(), restart, 10); err != nil {
log.Error(err)
}
}
}
func resizeDevice(cfg *rancherConfig.CloudConfig) error {
cmd := exec.Command("growpart", cfg.Rancher.ResizeDevice, "1")
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.Run()
cmd = exec.Command("partprobe", cfg.Rancher.ResizeDevice)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err := cmd.Run()
if err != nil {
return err
}
cmd = exec.Command("resize2fs", fmt.Sprintf("%s1", cfg.Rancher.ResizeDevice))
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err = cmd.Run()
if err != nil {
return err
}
return nil
}

View File

@@ -1,313 +0,0 @@
// Copyright 2015 CoreOS, Inc.
// Copyright 2015 Rancher Labs, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cloudinitsave
import (
"errors"
"flag"
"os"
"strings"
"sync"
"time"
yaml "github.com/cloudfoundry-incubator/candiedyaml"
log "github.com/Sirupsen/logrus"
"github.com/coreos/coreos-cloudinit/config"
"github.com/coreos/coreos-cloudinit/datasource"
"github.com/coreos/coreos-cloudinit/datasource/configdrive"
"github.com/coreos/coreos-cloudinit/datasource/file"
"github.com/coreos/coreos-cloudinit/datasource/metadata/digitalocean"
"github.com/coreos/coreos-cloudinit/datasource/metadata/ec2"
"github.com/coreos/coreos-cloudinit/datasource/metadata/packet"
"github.com/coreos/coreos-cloudinit/datasource/proc_cmdline"
"github.com/coreos/coreos-cloudinit/datasource/url"
"github.com/coreos/coreos-cloudinit/pkg"
"github.com/rancher/netconf"
"github.com/rancher/os/cmd/cloudinitsave/gce"
rancherConfig "github.com/rancher/os/config"
"github.com/rancher/os/util"
)
const (
datasourceInterval = 100 * time.Millisecond
datasourceMaxInterval = 30 * time.Second
datasourceTimeout = 5 * time.Minute
)
var (
network bool
flags *flag.FlagSet
)
func init() {
flags = flag.NewFlagSet(os.Args[0], flag.ContinueOnError)
flags.BoolVar(&network, "network", true, "use network based datasources")
}
func Main() {
flags.Parse(os.Args[1:])
log.Infof("Running cloud-init-save: network=%v", network)
if err := saveCloudConfig(); err != nil {
log.Errorf("Failed to save cloud-config: %v", err)
}
}
func saveFiles(cloudConfigBytes, scriptBytes []byte, metadata datasource.Metadata) error {
os.MkdirAll(rancherConfig.CloudConfigDir, os.ModeDir|0600)
if len(scriptBytes) > 0 {
log.Infof("Writing to %s", rancherConfig.CloudConfigScriptFile)
if err := util.WriteFileAtomic(rancherConfig.CloudConfigScriptFile, scriptBytes, 500); err != nil {
log.Errorf("Error while writing file %s: %v", rancherConfig.CloudConfigScriptFile, err)
return err
}
}
if len(cloudConfigBytes) > 0 {
if err := util.WriteFileAtomic(rancherConfig.CloudConfigBootFile, cloudConfigBytes, 400); err != nil {
return err
}
log.Infof("Written to %s:\n%s", rancherConfig.CloudConfigBootFile, string(cloudConfigBytes))
}
metaDataBytes, err := yaml.Marshal(metadata)
if err != nil {
return err
}
if err = util.WriteFileAtomic(rancherConfig.MetaDataFile, metaDataBytes, 400); err != nil {
return err
}
log.Infof("Written to %s:\n%s", rancherConfig.MetaDataFile, string(metaDataBytes))
return nil
}
func currentDatasource() (datasource.Datasource, error) {
cfg := rancherConfig.LoadConfig()
dss := getDatasources(cfg)
if len(dss) == 0 {
return nil, nil
}
ds := selectDatasource(dss)
return ds, nil
}
func saveCloudConfig() error {
userDataBytes, metadata, err := fetchUserData()
if err != nil {
return err
}
userData := string(userDataBytes)
scriptBytes := []byte{}
if config.IsScript(userData) {
scriptBytes = userDataBytes
userDataBytes = []byte{}
} else if isCompose(userData) {
if userDataBytes, err = composeToCloudConfig(userDataBytes); err != nil {
log.Errorf("Failed to convert compose to cloud-config syntax: %v", err)
return err
}
} else if config.IsCloudConfig(userData) {
if _, err := rancherConfig.ReadConfig(userDataBytes, false); err != nil {
log.WithFields(log.Fields{"cloud-config": userData, "err": err}).Warn("Failed to parse cloud-config, not saving.")
userDataBytes = []byte{}
}
} else {
log.Errorf("Unrecognized user-data\n%s", userData)
userDataBytes = []byte{}
}
if _, err := rancherConfig.ReadConfig(userDataBytes, false); err != nil {
log.WithFields(log.Fields{"cloud-config": userData, "err": err}).Warn("Failed to parse cloud-config")
return errors.New("Failed to parse cloud-config")
}
return saveFiles(userDataBytes, scriptBytes, metadata)
}
func fetchUserData() ([]byte, datasource.Metadata, error) {
var metadata datasource.Metadata
ds, err := currentDatasource()
if err != nil || ds == nil {
log.Errorf("Failed to select datasource: %v", err)
return nil, metadata, err
}
log.Infof("Fetching user-data from datasource %v", ds.Type())
userDataBytes, err := ds.FetchUserdata()
if err != nil {
log.Errorf("Failed fetching user-data from datasource: %v", err)
return nil, metadata, err
}
log.Infof("Fetching meta-data from datasource of type %v", ds.Type())
metadata, err = ds.FetchMetadata()
if err != nil {
log.Errorf("Failed fetching meta-data from datasource: %v", err)
return nil, metadata, err
}
return userDataBytes, metadata, nil
}
// getDatasources creates a slice of possible Datasources for cloudinit based
// on the different source command-line flags.
func getDatasources(cfg *rancherConfig.CloudConfig) []datasource.Datasource {
dss := make([]datasource.Datasource, 0, 5)
for _, ds := range cfg.Rancher.CloudInit.Datasources {
parts := strings.SplitN(ds, ":", 2)
switch parts[0] {
case "ec2":
if network {
if len(parts) == 1 {
dss = append(dss, ec2.NewDatasource(ec2.DefaultAddress))
} else {
dss = append(dss, ec2.NewDatasource(parts[1]))
}
}
case "file":
if len(parts) == 2 {
dss = append(dss, file.NewDatasource(parts[1]))
}
case "url":
if network {
if len(parts) == 2 {
dss = append(dss, url.NewDatasource(parts[1]))
}
}
case "cmdline":
if network {
if len(parts) == 1 {
dss = append(dss, proc_cmdline.NewDatasource())
}
}
case "configdrive":
if len(parts) == 2 {
dss = append(dss, configdrive.NewDatasource(parts[1]))
}
case "digitalocean":
if network {
if len(parts) == 1 {
dss = append(dss, digitalocean.NewDatasource(digitalocean.DefaultAddress))
} else {
dss = append(dss, digitalocean.NewDatasource(parts[1]))
}
} else {
enableDoLinkLocal()
}
case "gce":
if network {
dss = append(dss, gce.NewDatasource("http://metadata.google.internal/"))
}
case "packet":
if !network {
enablePacketNetwork(&cfg.Rancher)
}
dss = append(dss, packet.NewDatasource("https://metadata.packet.net/"))
}
}
return dss
}
func enableDoLinkLocal() {
err := netconf.ApplyNetworkConfigs(&netconf.NetworkConfig{
Interfaces: map[string]netconf.InterfaceConfig{
"eth0": {
IPV4LL: true,
},
},
})
if err != nil {
log.Errorf("Failed to apply link local on eth0: %v", err)
}
}
// selectDatasource attempts to choose a valid Datasource to use based on its
// current availability. The first Datasource to report to be available is
// returned. Datasources will be retried if possible if they are not
// immediately available. If all Datasources are permanently unavailable or
// datasourceTimeout is reached before one becomes available, nil is returned.
func selectDatasource(sources []datasource.Datasource) datasource.Datasource {
ds := make(chan datasource.Datasource)
stop := make(chan struct{})
var wg sync.WaitGroup
for _, s := range sources {
wg.Add(1)
go func(s datasource.Datasource) {
defer wg.Done()
duration := datasourceInterval
for {
log.Infof("Checking availability of %q\n", s.Type())
if s.IsAvailable() {
ds <- s
return
} else if !s.AvailabilityChanges() {
return
}
select {
case <-stop:
return
case <-time.After(duration):
duration = pkg.ExpBackoff(duration, datasourceMaxInterval)
}
}
}(s)
}
done := make(chan struct{})
go func() {
wg.Wait()
close(done)
}()
var s datasource.Datasource
select {
case s = <-ds:
case <-done:
case <-time.After(datasourceTimeout):
}
close(stop)
return s
}
func isCompose(content string) bool {
return strings.HasPrefix(content, "#compose\n")
}
func composeToCloudConfig(bytes []byte) ([]byte, error) {
compose := make(map[interface{}]interface{})
err := yaml.Unmarshal(bytes, &compose)
if err != nil {
return nil, err
}
return yaml.Marshal(map[interface{}]interface{}{
"rancher": map[interface{}]interface{}{
"services": compose,
},
})
}

View File

@@ -1,130 +0,0 @@
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gce
import (
"fmt"
"net"
"net/http"
"strconv"
"strings"
"github.com/coreos/coreos-cloudinit/datasource"
"github.com/coreos/coreos-cloudinit/datasource/metadata"
)
const (
apiVersion = "computeMetadata/v1/"
metadataPath = apiVersion
userdataPath = apiVersion + "instance/attributes/user-data"
)
type metadataService struct {
metadata.MetadataService
}
func NewDatasource(root string) *metadataService {
return &metadataService{metadata.NewDatasource(root, apiVersion, userdataPath, metadataPath, http.Header{"Metadata-Flavor": {"Google"}})}
}
func (ms metadataService) FetchMetadata() (datasource.Metadata, error) {
public, err := ms.fetchIP("instance/network-interfaces/0/access-configs/0/external-ip")
if err != nil {
return datasource.Metadata{}, err
}
local, err := ms.fetchIP("instance/network-interfaces/0/ip")
if err != nil {
return datasource.Metadata{}, err
}
hostname, err := ms.fetchString("instance/hostname")
if err != nil {
return datasource.Metadata{}, err
}
projectSshKeys, err := ms.fetchString("project/attributes/sshKeys")
if err != nil {
return datasource.Metadata{}, err
}
instanceSshKeys, err := ms.fetchString("instance/attributes/sshKeys")
if err != nil {
return datasource.Metadata{}, err
}
keyStrings := strings.Split(projectSshKeys+"\n"+instanceSshKeys, "\n")
sshPublicKeys := map[string]string{}
i := 0
for _, keyString := range keyStrings {
keySlice := strings.SplitN(keyString, ":", 2)
if len(keySlice) == 2 {
key := strings.TrimSpace(keySlice[1])
if key != "" {
sshPublicKeys[strconv.Itoa(i)] = strings.TrimSpace(keySlice[1])
i++
}
}
}
return datasource.Metadata{
PublicIPv4: public,
PrivateIPv4: local,
Hostname: hostname,
SSHPublicKeys: sshPublicKeys,
}, nil
}
func (ms metadataService) Type() string {
return "gce-metadata-service"
}
func (ms metadataService) fetchString(key string) (string, error) {
data, err := ms.FetchData(ms.MetadataUrl() + key)
if err != nil {
return "", err
}
return string(data), nil
}
func (ms metadataService) fetchIP(key string) (net.IP, error) {
str, err := ms.fetchString(key)
if err != nil {
return nil, err
}
if str == "" {
return nil, nil
}
if ip := net.ParseIP(str); ip != nil {
return ip, nil
} else {
return nil, fmt.Errorf("couldn't parse %q as IP address", str)
}
}
func (ms metadataService) FetchUserdata() ([]byte, error) {
data, err := ms.FetchData(ms.UserdataUrl())
if err != nil {
return nil, err
}
if len(data) == 0 {
data, err = ms.FetchData(ms.MetadataUrl() + "instance/attributes/startup-script")
if err != nil {
return nil, err
}
}
return data, nil
}

View File

@@ -1,103 +0,0 @@
package cloudinitsave
import (
"bytes"
"fmt"
"net/http"
"os"
"path"
"strings"
yaml "github.com/cloudfoundry-incubator/candiedyaml"
"github.com/Sirupsen/logrus"
"github.com/packethost/packngo/metadata"
"github.com/rancher/netconf"
rancherConfig "github.com/rancher/os/config"
)
func enablePacketNetwork(cfg *rancherConfig.RancherConfig) {
bootStrapped := false
for _, v := range cfg.Network.Interfaces {
if v.Address != "" {
if err := netconf.ApplyNetworkConfigs(&cfg.Network); err != nil {
logrus.Errorf("Failed to bootstrap network: %v", err)
return
}
bootStrapped = true
break
}
}
if !bootStrapped {
return
}
c := metadata.NewClient(http.DefaultClient)
m, err := c.Metadata.Get()
if err != nil {
logrus.Errorf("Failed to get Packet metadata: %v", err)
return
}
bondCfg := netconf.InterfaceConfig{
Addresses: []string{},
BondOpts: map[string]string{
"lacp_rate": "1",
"xmit_hash_policy": "layer3+4",
"downdelay": "200",
"updelay": "200",
"miimon": "100",
"mode": "4",
},
}
netCfg := netconf.NetworkConfig{
Interfaces: map[string]netconf.InterfaceConfig{},
}
for _, iface := range m.Network.Interfaces {
netCfg.Interfaces["mac="+iface.Mac] = netconf.InterfaceConfig{
Bond: "bond0",
}
}
for _, addr := range m.Network.Addresses {
bondCfg.Addresses = append(bondCfg.Addresses, fmt.Sprintf("%s/%d", addr.Address, addr.Cidr))
if addr.Gateway != "" {
if addr.AddressFamily == 4 {
if addr.Public {
bondCfg.Gateway = addr.Gateway
}
} else {
bondCfg.GatewayIpv6 = addr.Gateway
}
}
if addr.AddressFamily == 4 && strings.HasPrefix(addr.Gateway, "10.") {
bondCfg.PostUp = append(bondCfg.PostUp, "ip route add 10.0.0.0/8 via "+addr.Gateway)
}
}
netCfg.Interfaces["bond0"] = bondCfg
b, _ := yaml.Marshal(netCfg)
logrus.Debugf("Generated network config: %s", string(b))
cc := rancherConfig.CloudConfig{
Rancher: rancherConfig.RancherConfig{
Network: netCfg,
},
}
// Post to phone home URL on first boot
if _, err = os.Stat(rancherConfig.CloudConfigNetworkFile); err != nil {
if _, err = http.Post(m.PhoneHomeURL, "application/json", bytes.NewReader([]byte{})); err != nil {
logrus.Errorf("Failed to post to Packet phone home URL: %v", err)
}
}
if err := os.MkdirAll(path.Dir(rancherConfig.CloudConfigNetworkFile), 0700); err != nil {
logrus.Errorf("Failed to create directory for file %s: %v", rancherConfig.CloudConfigNetworkFile, err)
}
if err := rancherConfig.WriteToFile(cc, rancherConfig.CloudConfigNetworkFile); err != nil {
logrus.Errorf("Failed to save config file %s: %v", rancherConfig.CloudConfigNetworkFile, err)
}
}

View File

@@ -1,312 +0,0 @@
package console
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path"
"regexp"
"strings"
"syscall"
log "github.com/Sirupsen/logrus"
"github.com/rancher/os/cmd/cloudinitexecute"
"github.com/rancher/os/config"
"github.com/rancher/os/util"
)
const (
consoleDone = "/run/console-done"
dockerHome = "/home/docker"
gettyCmd = "/sbin/agetty"
rancherHome = "/home/rancher"
startScript = "/opt/rancher/bin/start.sh"
)
type symlink struct {
oldname, newname string
}
func Main() {
cfg := config.LoadConfig()
if _, err := os.Stat(rancherHome); os.IsNotExist(err) {
if err := os.MkdirAll(rancherHome, 0755); err != nil {
log.Error(err)
}
if err := os.Chown(rancherHome, 1100, 1100); err != nil {
log.Error(err)
}
}
if _, err := os.Stat(dockerHome); os.IsNotExist(err) {
if err := os.MkdirAll(dockerHome, 0755); err != nil {
log.Error(err)
}
if err := os.Chown(dockerHome, 1101, 1101); err != nil {
log.Error(err)
}
}
password := config.GetCmdline("rancher.password")
if password != "" {
cmd := exec.Command("chpasswd")
cmd.Stdin = strings.NewReader(fmt.Sprint("rancher:", password))
if err := cmd.Run(); err != nil {
log.Error(err)
}
cmd = exec.Command("bash", "-c", `sed -E -i 's/(rancher:.*:).*(:.*:.*:.*:.*:.*:.*)$/\1\2/' /etc/shadow`)
if err := cmd.Run(); err != nil {
log.Error(err)
}
}
if err := setupSSH(cfg); err != nil {
log.Error(err)
}
if err := writeRespawn(); err != nil {
log.Error(err)
}
if err := modifySshdConfig(); err != nil {
log.Error(err)
}
if err := writeOsRelease(); err != nil {
log.Error(err)
}
for _, link := range []symlink{
{"/var/lib/rancher/engine/docker", "/usr/bin/docker"},
{"/var/lib/rancher/engine/docker-containerd", "/usr/bin/docker-containerd"},
{"/var/lib/rancher/engine/docker-containerd-ctr", "/usr/bin/docker-containerd-ctr"},
{"/var/lib/rancher/engine/docker-containerd-shim", "/usr/bin/docker-containerd-shim"},
{"/var/lib/rancher/engine/dockerd", "/usr/bin/dockerd"},
{"/var/lib/rancher/engine/docker-proxy", "/usr/bin/docker-proxy"},
{"/var/lib/rancher/engine/docker-runc", "/usr/bin/docker-runc"},
} {
syscall.Unlink(link.newname)
if err := os.Symlink(link.oldname, link.newname); err != nil {
log.Error(err)
}
}
cmd := exec.Command("bash", "-c", `echo 'RancherOS \n \l' > /etc/issue`)
if err := cmd.Run(); err != nil {
log.Error(err)
}
cmd = exec.Command("bash", "-c", `echo $(/sbin/ifconfig | grep -B1 "inet addr" |awk '{ if ( $1 == "inet" ) { print $2 } else if ( $2 == "Link" ) { printf "%s:" ,$1 } }' |awk -F: '{ print $1 ": " $3}') >> /etc/issue`)
if err := cmd.Run(); err != nil {
log.Error(err)
}
cloudinitexecute.ApplyConsole(cfg)
if err := runScript(config.CloudConfigScriptFile); err != nil {
log.Error(err)
}
if err := runScript(startScript); err != nil {
log.Error(err)
}
if err := ioutil.WriteFile(consoleDone, []byte(cfg.Rancher.Console), 0644); err != nil {
log.Error(err)
}
if err := runScript("/etc/rc.local"); err != nil {
log.Error(err)
}
os.Setenv("TERM", "linux")
respawnBinPath, err := exec.LookPath("respawn")
if err != nil {
log.Fatal(err)
}
log.Fatal(syscall.Exec(respawnBinPath, []string{"respawn", "-f", "/etc/respawn.conf"}, os.Environ()))
}
func generateRespawnConf(cmdline string) string {
var respawnConf bytes.Buffer
for i := 1; i < 7; i++ {
tty := fmt.Sprintf("tty%d", i)
respawnConf.WriteString(gettyCmd)
if strings.Contains(cmdline, fmt.Sprintf("rancher.autologin=%s", tty)) {
respawnConf.WriteString(" --autologin rancher")
}
respawnConf.WriteString(fmt.Sprintf(" 115200 %s\n", tty))
}
for _, tty := range []string{"ttyS0", "ttyS1", "ttyS2", "ttyS3", "ttyAMA0"} {
if !strings.Contains(cmdline, fmt.Sprintf("console=%s", tty)) {
continue
}
respawnConf.WriteString(gettyCmd)
if strings.Contains(cmdline, fmt.Sprintf("rancher.autologin=%s", tty)) {
respawnConf.WriteString(" --autologin rancher")
}
respawnConf.WriteString(fmt.Sprintf(" 115200 %s\n", tty))
}
respawnConf.WriteString("/usr/sbin/sshd -D")
return respawnConf.String()
}
func writeRespawn() error {
cmdline, err := ioutil.ReadFile("/proc/cmdline")
if err != nil {
return err
}
respawn := generateRespawnConf(string(cmdline))
files, err := ioutil.ReadDir("/etc/respawn.conf.d")
if err == nil {
for _, f := range files {
p := path.Join("/etc/respawn.conf.d", f.Name())
content, err := ioutil.ReadFile(p)
if err != nil {
log.Errorf("Failed to read %s: %v", p, err)
continue
}
respawn += fmt.Sprintf("\n%s", string(content))
}
} else if !os.IsNotExist(err) {
log.Error(err)
}
return ioutil.WriteFile("/etc/respawn.conf", []byte(respawn), 0644)
}
func modifySshdConfig() error {
sshdConfig, err := ioutil.ReadFile("/etc/ssh/sshd_config")
if err != nil {
return err
}
sshdConfigString := string(sshdConfig)
for _, item := range []string{
"UseDNS no",
"PermitRootLogin no",
"ServerKeyBits 2048",
"AllowGroups docker",
} {
match, err := regexp.Match("^"+item, sshdConfig)
if err != nil {
return err
}
if !match {
sshdConfigString += fmt.Sprintf("%s\n", item)
}
}
return ioutil.WriteFile("/etc/ssh/sshd_config", []byte(sshdConfigString), 0644)
}
func writeOsRelease() error {
idLike := "busybox"
if osRelease, err := ioutil.ReadFile("/etc/os-release"); err == nil {
for _, line := range strings.Split(string(osRelease), "\n") {
if strings.HasPrefix(line, "ID_LIKE") {
split := strings.Split(line, "ID_LIKE")
if len(split) > 1 {
idLike = split[1]
}
}
}
}
return ioutil.WriteFile("/etc/os-release", []byte(fmt.Sprintf(`
NAME="RancherOS"
VERSION=%s
ID=rancheros
ID_LIKE=%s
VERSION_ID=%s
PRETTY_NAME="RancherOS %s"
HOME_URL=
SUPPORT_URL=
BUG_REPORT_URL=
BUILD_ID=
`, config.VERSION, idLike, config.VERSION, config.VERSION)), 0644)
}
func setupSSH(cfg *config.CloudConfig) error {
for _, keyType := range []string{"rsa", "dsa", "ecdsa", "ed25519"} {
outputFile := fmt.Sprintf("/etc/ssh/ssh_host_%s_key", keyType)
outputFilePub := fmt.Sprintf("/etc/ssh/ssh_host_%s_key.pub", keyType)
if _, err := os.Stat(outputFile); err == nil {
continue
}
saved, savedExists := cfg.Rancher.Ssh.Keys[keyType]
pub, pubExists := cfg.Rancher.Ssh.Keys[keyType+"-pub"]
if savedExists && pubExists {
// TODO check permissions
if err := util.WriteFileAtomic(outputFile, []byte(saved), 0600); err != nil {
return err
}
if err := util.WriteFileAtomic(outputFilePub, []byte(pub), 0600); err != nil {
return err
}
continue
}
cmd := exec.Command("bash", "-c", fmt.Sprintf("ssh-keygen -f %s -N '' -t %s", outputFile, keyType))
if err := cmd.Run(); err != nil {
return err
}
savedBytes, err := ioutil.ReadFile(outputFile)
if err != nil {
return err
}
pubBytes, err := ioutil.ReadFile(outputFilePub)
if err != nil {
return err
}
config.Set(fmt.Sprintf("rancher.ssh.keys.%s", keyType), string(savedBytes))
config.Set(fmt.Sprintf("rancher.ssh.keys.%s-pub", keyType), string(pubBytes))
}
return os.MkdirAll("/var/run/sshd", 0644)
}
func runScript(path string) error {
if !util.ExistsAndExecutable(path) {
return nil
}
script, err := os.Open(path)
if err != nil {
return err
}
magic := make([]byte, 2)
if _, err = script.Read(magic); err != nil {
return err
}
cmd := exec.Command("/bin/sh", path)
if string(magic) == "#!" {
cmd = exec.Command(path)
}
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
return cmd.Run()
}

View File

@@ -3,7 +3,6 @@ package control
import (
"os"
log "github.com/Sirupsen/logrus"
"github.com/codegangsta/cli"
"github.com/rancher/os/config"
)
@@ -16,12 +15,6 @@ func Main() {
app.Version = config.VERSION
app.Author = "Rancher Labs, Inc."
app.EnableBashCompletion = true
app.Before = func(c *cli.Context) error {
if os.Geteuid() != 0 {
log.Fatalf("%s: Need to be root", os.Args[0])
}
return nil
}
app.Commands = []cli.Command{
{
@@ -31,12 +24,6 @@ func Main() {
HideHelp: true,
Subcommands: configSubcommands(),
},
{
Name: "console",
Usage: "manage which console container is used",
HideHelp: true,
Subcommands: consoleSubcommands(),
},
{
Name: "dev",
ShortName: "d",
@@ -45,18 +32,6 @@ func Main() {
SkipFlagParsing: true,
Action: devAction,
},
{
Name: "engine",
Usage: "manage which Docker engine is used",
HideHelp: true,
Subcommands: engineSubcommands(),
},
{
Name: "entrypoint",
HideHelp: true,
SkipFlagParsing: true,
Action: entrypointAction,
},
{
Name: "env",
ShortName: "e",
@@ -79,7 +54,6 @@ func Main() {
Subcommands: tlsConfCommands(),
},
installCommand,
selinuxCommand(),
}
app.Run(os.Args)

View File

@@ -7,14 +7,12 @@ import (
"os"
"sort"
"strings"
"text/template"
log "github.com/Sirupsen/logrus"
yaml "github.com/cloudfoundry-incubator/candiedyaml"
"github.com/codegangsta/cli"
"github.com/rancher/os/config"
"github.com/rancher/os/util"
)
func configSubcommands() []cli.Command {
@@ -29,6 +27,17 @@ func configSubcommands() []cli.Command {
Usage: "set a value",
Action: configSet,
},
{
Name: "import",
Usage: "import configuration from standard in or a file",
Action: runImport,
Flags: []cli.Flag{
cli.StringFlag{
Name: "input, i",
Usage: "File from which to read",
},
},
},
{
Name: "images",
Usage: "List Docker images for a configuration from a file",
@@ -40,12 +49,6 @@ func configSubcommands() []cli.Command {
},
},
},
{
Name: "generate",
Usage: "Generate a configuration file from a template",
Action: runGenerate,
HideHelp: true,
},
{
Name: "export",
Usage: "export configuration",
@@ -54,6 +57,10 @@ func configSubcommands() []cli.Command {
Name: "output, o",
Usage: "File to which to save",
},
cli.BoolFlag{
Name: "boot, b",
Usage: "Include cloud-config provided at boot",
},
cli.BoolFlag{
Name: "private, p",
Usage: "Include the generated private keys",
@@ -69,12 +76,6 @@ func configSubcommands() []cli.Command {
Name: "merge",
Usage: "merge configuration from stdin",
Action: merge,
Flags: []cli.Flag{
cli.StringFlag{
Name: "input, i",
Usage: "File from which to read",
},
},
},
}
}
@@ -102,7 +103,7 @@ func imagesFromConfig(cfg *config.CloudConfig) []string {
return images
}
func runImages(c *cli.Context) error {
func runImages(c *cli.Context) {
configFile := c.String("input")
cfg, err := config.ReadConfig(nil, false, configFile)
if err != nil {
@@ -110,58 +111,78 @@ func runImages(c *cli.Context) error {
}
images := imagesFromConfig(cfg)
fmt.Println(strings.Join(images, " "))
return nil
}
func runGenerate(c *cli.Context) error {
if err := genTpl(os.Stdin, os.Stdout); err != nil {
log.Fatalf("Failed to generate config, err: '%s'", err)
}
return nil
}
func runImport(c *cli.Context) {
var input io.ReadCloser
var err error
input = os.Stdin
cfg, err := config.LoadConfig()
func genTpl(in io.Reader, out io.Writer) error {
bytes, err := ioutil.ReadAll(in)
if err != nil {
log.Fatal("Could not read from stdin")
}
tpl := template.Must(template.New("osconfig").Parse(string(bytes)))
return tpl.Execute(out, env2map(os.Environ()))
}
func env2map(env []string) map[string]string {
m := make(map[string]string, len(env))
for _, s := range env {
d := strings.Split(s, "=")
m[d[0]] = d[1]
}
return m
}
func configSet(c *cli.Context) error {
key := c.Args().Get(0)
value := c.Args().Get(1)
if key == "" {
return nil
}
err := config.Set(key, value)
if err != nil {
log.Fatal(err)
}
return nil
}
func configGet(c *cli.Context) error {
arg := c.Args().Get(0)
if arg == "" {
return nil
inputFile := c.String("input")
if inputFile != "" {
input, err = os.Open(inputFile)
if err != nil {
log.Fatal(err)
}
defer input.Close()
}
val, err := config.Get(arg)
bytes, err := ioutil.ReadAll(input)
if err != nil {
log.WithFields(log.Fields{"key": arg, "val": val, "err": err}).Fatal("config get: failed to retrieve value")
log.Fatal(err)
}
cfg, err = cfg.Import(bytes)
if err != nil {
log.Fatal(err)
}
if err := cfg.Save(); err != nil {
log.Fatal(err)
}
}
func configSet(c *cli.Context) {
key := c.Args().Get(0)
value := c.Args().Get(1)
if key == "" {
return
}
cfg, err := config.LoadConfig()
if err != nil {
log.Fatal(err)
}
cfg, err = cfg.Set(key, value)
if err != nil {
log.Fatal(err)
}
if err := cfg.Save(); err != nil {
log.Fatal(err)
}
}
func configGet(c *cli.Context) {
arg := c.Args().Get(0)
if arg == "" {
return
}
cfg, err := config.LoadConfig()
if err != nil {
log.WithFields(log.Fields{"err": err}).Fatal("config get: failed to load config")
}
val, err := cfg.Get(arg)
if err != nil {
log.WithFields(log.Fields{"cfg": cfg, "key": arg, "val": val, "err": err}).Fatal("config get: failed to retrieve value")
}
printYaml := false
@@ -181,36 +202,31 @@ func configGet(c *cli.Context) error {
} else {
fmt.Println(val)
}
return nil
}
func merge(c *cli.Context) error {
input := os.Stdin
inputFile := c.String("input")
if inputFile != "" {
var err error
input, err = os.Open(inputFile)
if err != nil {
log.Fatal(err)
}
defer input.Close()
}
bytes, err := ioutil.ReadAll(input)
func merge(c *cli.Context) {
bytes, err := ioutil.ReadAll(os.Stdin)
if err != nil {
log.Fatal(err)
}
if err = config.Merge(bytes); err != nil {
cfg, err := config.LoadConfig()
if err != nil {
log.Fatal(err)
}
return nil
cfg, err = cfg.MergeBytes(bytes)
if err != nil {
log.Fatal(err)
}
if err := cfg.Save(); err != nil {
log.Fatal(err)
}
}
func export(c *cli.Context) error {
content, err := config.Export(c.Bool("private"), c.Bool("full"))
func export(c *cli.Context) {
content, err := config.Dump(c.Bool("boot"), c.Bool("private"), c.Bool("full"))
if err != nil {
log.Fatal(err)
}
@@ -219,11 +235,9 @@ func export(c *cli.Context) error {
if output == "" {
fmt.Println(content)
} else {
err := util.WriteFileAtomic(output, []byte(content), 0400)
err := ioutil.WriteFile(output, []byte(content), 0400)
if err != nil {
log.Fatal(err)
}
}
return nil
}

View File

@@ -1,56 +0,0 @@
package control
import (
"bytes"
"strings"
"testing"
"github.com/stretchr/testify/require"
"os"
)
func TestGenTpl(t *testing.T) {
assert := require.New(t)
tpl := `
services:
{{if eq "amd64" .ARCH -}}
acpid:
image: rancher/os-acpid:0.x.x
labels:
io.rancher.os.scope: system
net: host
uts: host
privileged: true
volumes_from:
- command-volumes
- system-volumes
{{end -}}
all-volumes:`
for _, tc := range []struct {
arch string
expected string
}{
{"amd64", `
services:
acpid:
image: rancher/os-acpid:0.x.x
labels:
io.rancher.os.scope: system
net: host
uts: host
privileged: true
volumes_from:
- command-volumes
- system-volumes
all-volumes:`},
{"arm", `
services:
all-volumes:`},
} {
out := &bytes.Buffer{}
os.Setenv("ARCH", tc.arch)
genTpl(strings.NewReader(tpl), out)
assert.Equal(tc.expected, out.String(), tc.arch)
}
}

View File

@@ -1,159 +0,0 @@
package control
import (
"bufio"
"fmt"
"io/ioutil"
"os"
"sort"
"strings"
"golang.org/x/net/context"
log "github.com/Sirupsen/logrus"
"github.com/codegangsta/cli"
composeConfig "github.com/docker/libcompose/config"
"github.com/docker/libcompose/project/options"
"github.com/rancher/os/compose"
"github.com/rancher/os/config"
"github.com/rancher/os/util/network"
)
func consoleSubcommands() []cli.Command {
return []cli.Command{
{
Name: "switch",
Usage: "switch console without a reboot",
Action: consoleSwitch,
Flags: []cli.Flag{
cli.BoolFlag{
Name: "force, f",
Usage: "do not prompt for input",
},
cli.BoolFlag{
Name: "no-pull",
Usage: "don't pull console image",
},
},
},
{
Name: "enable",
Usage: "set console to be switched on next reboot",
Action: consoleEnable,
},
{
Name: "list",
Usage: "list available consoles",
Action: consoleList,
},
}
}
func consoleSwitch(c *cli.Context) error {
if len(c.Args()) != 1 {
log.Fatal("Must specify exactly one console to switch to")
}
newConsole := c.Args()[0]
cfg := config.LoadConfig()
if newConsole == currentConsole() {
log.Warnf("Console is already set to %s", newConsole)
}
if !c.Bool("force") {
in := bufio.NewReader(os.Stdin)
fmt.Println(`Switching consoles will
1. destroy the current console container
2. log you out
3. restart Docker`)
if !yes(in, "Continue") {
return nil
}
}
if !c.Bool("no-pull") && newConsole != "default" {
if err := compose.StageServices(cfg, newConsole); err != nil {
return err
}
}
service, err := compose.CreateService(nil, "switch-console", &composeConfig.ServiceConfigV1{
LogDriver: "json-file",
Privileged: true,
Net: "host",
Pid: "host",
Image: config.OS_BASE,
Labels: map[string]string{
config.SCOPE: config.SYSTEM,
},
Command: []string{"/usr/bin/switch-console", newConsole},
VolumesFrom: []string{"all-volumes"},
})
if err != nil {
return err
}
if err = service.Delete(context.Background(), options.Delete{}); err != nil {
return err
}
if err = service.Up(context.Background(), options.Up{}); err != nil {
return err
}
return service.Log(context.Background(), true)
}
func consoleEnable(c *cli.Context) error {
if len(c.Args()) != 1 {
log.Fatal("Must specify exactly one console to enable")
}
newConsole := c.Args()[0]
cfg := config.LoadConfig()
if newConsole != "default" {
if err := compose.StageServices(cfg, newConsole); err != nil {
return err
}
}
if err := config.Set("rancher.console", newConsole); err != nil {
log.Errorf("Failed to update 'rancher.console': %v", err)
}
return nil
}
func consoleList(c *cli.Context) error {
cfg := config.LoadConfig()
consoles, err := network.GetConsoles(cfg.Rancher.Repositories.ToArray())
if err != nil {
return err
}
consoles = append(consoles, "default")
sort.Strings(consoles)
currentConsole := currentConsole()
for _, console := range consoles {
if console == currentConsole {
fmt.Printf("current %s\n", console)
} else if console == cfg.Rancher.Console {
fmt.Printf("enabled %s\n", console)
} else {
fmt.Printf("disabled %s\n", console)
}
}
return nil
}
func currentConsole() (console string) {
consoleBytes, err := ioutil.ReadFile("/run/console-done")
if err == nil {
console = strings.TrimSpace(string(consoleBytes))
} else {
log.Warnf("Failed to detect current console: %v", err)
}
return
}

View File

@@ -7,9 +7,8 @@ import (
"github.com/rancher/os/util"
)
func devAction(c *cli.Context) error {
func devAction(c *cli.Context) {
if len(c.Args()) > 0 {
fmt.Println(util.ResolveDevice(c.Args()[0]))
}
return nil
}

View File

@@ -1,136 +0,0 @@
package control
import (
"fmt"
"io/ioutil"
"sort"
"strings"
"golang.org/x/net/context"
log "github.com/Sirupsen/logrus"
"github.com/codegangsta/cli"
"github.com/docker/libcompose/project/options"
"github.com/rancher/os/compose"
"github.com/rancher/os/config"
"github.com/rancher/os/util/network"
)
const (
dockerDone = "/run/docker-done"
)
func engineSubcommands() []cli.Command {
return []cli.Command{
{
Name: "switch",
Usage: "switch Docker engine without a reboot",
Action: engineSwitch,
Flags: []cli.Flag{
cli.BoolFlag{
Name: "force, f",
Usage: "do not prompt for input",
},
cli.BoolFlag{
Name: "no-pull",
Usage: "don't pull console image",
},
},
},
{
Name: "enable",
Usage: "set Docker engine to be switched on next reboot",
Action: engineEnable,
},
{
Name: "list",
Usage: "list available Docker engines",
Action: engineList,
},
}
}
func engineSwitch(c *cli.Context) error {
if len(c.Args()) != 1 {
log.Fatal("Must specify exactly one Docker engine to switch to")
}
newEngine := c.Args()[0]
cfg := config.LoadConfig()
project, err := compose.GetProject(cfg, true, false)
if err != nil {
log.Fatal(err)
}
if err = project.Stop(context.Background(), 10, "docker"); err != nil {
log.Fatal(err)
}
if err = compose.LoadSpecialService(project, cfg, "docker", newEngine); err != nil {
log.Fatal(err)
}
if err = project.Up(context.Background(), options.Up{}, "docker"); err != nil {
log.Fatal(err)
}
if err := config.Set("rancher.docker.engine", newEngine); err != nil {
log.Errorf("Failed to update rancher.docker.engine: %v", err)
}
return nil
}
func engineEnable(c *cli.Context) error {
if len(c.Args()) != 1 {
log.Fatal("Must specify exactly one Docker engine to enable")
}
newEngine := c.Args()[0]
cfg := config.LoadConfig()
if err := compose.StageServices(cfg, newEngine); err != nil {
return err
}
if err := config.Set("rancher.docker.engine", newEngine); err != nil {
log.Errorf("Failed to update 'rancher.docker.engine': %v", err)
}
return nil
}
func engineList(c *cli.Context) error {
cfg := config.LoadConfig()
engines, err := network.GetEngines(cfg.Rancher.Repositories.ToArray())
if err != nil {
return err
}
sort.Strings(engines)
currentEngine := currentEngine()
for _, engine := range engines {
if engine == currentEngine {
fmt.Printf("current %s\n", engine)
} else if engine == cfg.Rancher.Docker.Engine {
fmt.Printf("enabled %s\n", engine)
} else {
fmt.Printf("disabled %s\n", engine)
}
}
return nil
}
func currentEngine() (engine string) {
engineBytes, err := ioutil.ReadFile(dockerDone)
if err == nil {
engine = strings.TrimSpace(string(engineBytes))
} else {
log.Warnf("Failed to detect current Docker engine: %v", err)
}
return
}

View File

@@ -1,76 +0,0 @@
package control
import (
"os"
"os/exec"
"syscall"
log "github.com/Sirupsen/logrus"
"github.com/codegangsta/cli"
"golang.org/x/net/context"
"github.com/rancher/os/cmd/cloudinitexecute"
"github.com/rancher/os/config"
"github.com/rancher/os/docker"
"github.com/rancher/os/util"
)
const (
ca = "/etc/ssl/certs/ca-certificates.crt"
caBase = "/etc/ssl/certs/ca-certificates.crt.rancher"
)
func entrypointAction(c *cli.Context) error {
if _, err := os.Stat("/host/dev"); err == nil {
cmd := exec.Command("mount", "--rbind", "/host/dev", "/dev")
if err := cmd.Run(); err != nil {
log.Errorf("Failed to mount /dev: %v", err)
}
}
if err := util.FileCopy(caBase, ca); err != nil && !os.IsNotExist(err) {
log.Error(err)
}
cfg := config.LoadConfig()
shouldWriteFiles := false
for _, file := range cfg.WriteFiles {
if file.Container != "" {
shouldWriteFiles = true
}
}
if shouldWriteFiles {
writeFiles(cfg)
}
if len(os.Args) < 3 {
return nil
}
binary, err := exec.LookPath(os.Args[2])
if err != nil {
return err
}
return syscall.Exec(binary, os.Args[2:], os.Environ())
}
func writeFiles(cfg *config.CloudConfig) error {
id, err := util.GetCurrentContainerId()
if err != nil {
return err
}
client, err := docker.NewSystemClient()
if err != nil {
return err
}
info, err := client.ContainerInspect(context.Background(), id)
if err != nil {
return err
}
cloudinitexecute.WriteFiles(cfg, info.Name[1:])
return nil
}

View File

@@ -11,12 +11,15 @@ import (
"github.com/rancher/os/util"
)
func envAction(c *cli.Context) error {
cfg := config.LoadConfig()
func envAction(c *cli.Context) {
cfg, err := config.LoadConfig()
if err != nil {
log.Fatal(err)
}
args := c.Args()
if len(args) == 0 {
return nil
return
}
osEnv := os.Environ()
@@ -36,6 +39,4 @@ func envAction(c *cli.Context) error {
if err := syscall.Exec(args[0], args, util.Map2KVPairs(envMap)); err != nil {
log.Fatal(err)
}
return nil
}

View File

@@ -5,7 +5,6 @@ import (
"fmt"
"os"
"os/exec"
"strings"
log "github.com/Sirupsen/logrus"
@@ -46,14 +45,10 @@ var installCommand = cli.Command{
Name: "no-reboot",
Usage: "do not reboot after install",
},
cli.StringFlag{
Name: "append, a",
Usage: "append additional kernel parameters",
},
},
}
func installAction(c *cli.Context) error {
func installAction(c *cli.Context) {
if c.Args().Present() {
log.Fatalf("invalid arguments %v", c.Args())
}
@@ -63,9 +58,12 @@ func installAction(c *cli.Context) error {
}
image := c.String("image")
cfg := config.LoadConfig()
cfg, err := config.LoadConfig()
if err != nil {
log.WithFields(log.Fields{"err": err}).Fatal("ros install: failed to load config")
}
if image == "" {
image = cfg.Rancher.Upgrade.Image + ":" + config.VERSION + config.SUFFIX
image = cfg.Rancher.Upgrade.Image + ":" + config.VERSION
}
installType := c.String("install-type")
@@ -85,18 +83,15 @@ func installAction(c *cli.Context) error {
cloudConfig = uc
}
append := strings.TrimSpace(c.String("append"))
force := c.Bool("force")
reboot := !c.Bool("no-reboot")
if err := runInstall(image, installType, cloudConfig, device, append, force, reboot); err != nil {
if err := runInstall(image, installType, cloudConfig, device, force, reboot); err != nil {
log.WithFields(log.Fields{"err": err}).Fatal("Failed to run install")
}
return nil
}
func runInstall(image, installType, cloudConfig, device, append string, force, reboot bool) error {
func runInstall(image, installType, cloudConfig, device string, force, reboot bool) error {
in := bufio.NewReader(os.Stdin)
fmt.Printf("Installing from %s\n", image)
@@ -115,14 +110,14 @@ func runInstall(image, installType, cloudConfig, device, append string, force, r
return err
}
}
cmd := exec.Command("system-docker", "run", "--net=host", "--privileged", "--volumes-from=user-volumes",
"--volumes-from=command-volumes", image, "-d", device, "-t", installType, "-c", cloudConfig, "-a", append)
cmd := exec.Command("system-docker", "run", "--net=host", "--privileged", "--volumes-from=user-volumes", image,
"-d", device, "-t", installType, "-c", cloudConfig)
cmd.Stdout, cmd.Stderr = os.Stdout, os.Stderr
if err := cmd.Run(); err != nil {
return err
}
if reboot && (force || yes(in, "Continue with reboot")) {
if reboot && yes(in, "Continue with reboot") {
log.Info("Rebooting")
power.Reboot()
}

View File

@@ -9,15 +9,13 @@ import (
"os"
"strings"
"golang.org/x/net/context"
log "github.com/Sirupsen/logrus"
yaml "github.com/cloudfoundry-incubator/candiedyaml"
dockerClient "github.com/fsouza/go-dockerclient"
"github.com/codegangsta/cli"
dockerClient "github.com/docker/engine-api/client"
composeConfig "github.com/docker/libcompose/config"
"github.com/docker/libcompose/project/options"
"github.com/docker/libcompose/project"
"github.com/rancher/os/cmd/power"
"github.com/rancher/os/compose"
"github.com/rancher/os/config"
@@ -52,18 +50,6 @@ func osSubcommands() []cli.Command {
Name: "no-reboot",
Usage: "do not reboot after upgrade",
},
cli.BoolFlag{
Name: "kexec",
Usage: "reboot using kexec",
},
cli.StringFlag{
Name: "append",
Usage: "append additional kernel parameters",
},
cli.BoolFlag{
Name: "upgrade-console",
Usage: "upgrade console even if persistent",
},
},
},
{
@@ -117,7 +103,7 @@ func getImages() (*Images, error) {
return parseBody(body)
}
func osMetaDataGet(c *cli.Context) error {
func osMetaDataGet(c *cli.Context) {
images, err := getImages()
if err != nil {
log.Fatal(err)
@@ -129,15 +115,13 @@ func osMetaDataGet(c *cli.Context) error {
}
for _, image := range images.Available {
_, _, err := client.ImageInspectWithRaw(context.Background(), image, false)
if dockerClient.IsErrImageNotFound(err) {
_, err := client.InspectImage(image)
if err == dockerClient.ErrNoSuchImage {
fmt.Println(image, "remote")
} else {
fmt.Println(image, "local")
}
}
return nil
}
func getLatestImage() (string, error) {
@@ -149,7 +133,7 @@ func getLatestImage() (string, error) {
return images.Current, nil
}
func osUpgrade(c *cli.Context) error {
func osUpgrade(c *cli.Context) {
image := c.String("image")
if image == "" {
@@ -165,94 +149,67 @@ func osUpgrade(c *cli.Context) error {
if c.Args().Present() {
log.Fatalf("invalid arguments %v", c.Args())
}
if err := startUpgradeContainer(image, c.Bool("stage"), c.Bool("force"), !c.Bool("no-reboot"), c.Bool("kexec"), c.Bool("upgrade-console"), c.String("append")); err != nil {
if err := startUpgradeContainer(image, c.Bool("stage"), c.Bool("force"), !c.Bool("no-reboot")); err != nil {
log.Fatal(err)
}
}
func osVersion(c *cli.Context) {
fmt.Println(config.VERSION)
}
func yes(in *bufio.Reader, question string) bool {
fmt.Printf("%s [y/N]: ", question)
line, err := in.ReadString('\n')
if err != nil {
log.Fatal(err)
}
return nil
return strings.ToLower(line[0:1]) == "y"
}
func osVersion(c *cli.Context) error {
fmt.Println(config.VERSION)
return nil
}
func startUpgradeContainer(image string, stage, force, reboot, kexec bool, upgradeConsole bool, kernelArgs string) error {
func startUpgradeContainer(image string, stage, force, reboot bool) error {
in := bufio.NewReader(os.Stdin)
command := []string{
"-t", "rancher-upgrade",
"-r", config.VERSION,
}
if kexec {
command = append(command, "-k")
}
kernelArgs = strings.TrimSpace(kernelArgs)
if kernelArgs != "" {
command = append(command, "-a", kernelArgs)
}
if upgradeConsole {
if err := config.Set("rancher.force_console_rebuild", true); err != nil {
log.Fatal(err)
}
}
fmt.Printf("Upgrading to %s\n", image)
confirmation := "Continue"
imageSplit := strings.Split(image, ":")
if len(imageSplit) > 1 && imageSplit[1] == config.VERSION+config.SUFFIX {
confirmation = fmt.Sprintf("Already at version %s. Continue anyway", imageSplit[1])
}
if !force && !yes(in, confirmation) {
os.Exit(1)
}
container, err := compose.CreateService(nil, "os-upgrade", &composeConfig.ServiceConfigV1{
container, err := compose.CreateService(nil, "os-upgrade", &project.ServiceConfig{
LogDriver: "json-file",
Privileged: true,
Net: "host",
Pid: "host",
Image: image,
Labels: map[string]string{
Labels: project.NewSliceorMap(map[string]string{
config.SCOPE: config.SYSTEM,
},
Command: command,
}),
Command: project.NewCommand(
"-t", "rancher-upgrade",
"-r", config.VERSION,
),
})
if err != nil {
return err
}
client, err := docker.NewSystemClient()
if err != nil {
if err := container.Pull(); err != nil {
return err
}
// Only pull image if not found locally
if _, _, err := client.ImageInspectWithRaw(context.Background(), image, false); err != nil {
if err := container.Pull(context.Background()); err != nil {
return err
}
}
if !stage {
// If there is already an upgrade container, delete it
// Up() should to this, but currently does not due to a bug
if err := container.Delete(context.Background(), options.Delete{}); err != nil {
fmt.Printf("Upgrading to %s\n", image)
if !force {
if !yes(in, "Continue") {
os.Exit(1)
}
}
if err := container.Start(); err != nil {
return err
}
if err := container.Up(context.Background(), options.Up{}); err != nil {
if err := container.Log(); err != nil {
return err
}
if err := container.Log(context.Background(), true); err != nil {
return err
}
if err := container.Delete(context.Background(), options.Delete{}); err != nil {
if err := container.Up(); err != nil {
return err
}
@@ -276,6 +233,10 @@ func parseBody(body []byte) (*Images, error) {
}
func getUpgradeUrl() (string, error) {
cfg := config.LoadConfig()
cfg, err := config.LoadConfig()
if err != nil {
return "", err
}
return cfg.Rancher.Upgrade.Url, nil
}

View File

@@ -1,59 +0,0 @@
package control
import (
"fmt"
"syscall"
"github.com/codegangsta/cli"
"github.com/rancher/os/config"
)
func selinuxCommand() cli.Command {
app := cli.Command{}
app.Name = "selinux"
app.Usage = "Launch SELinux tools container."
app.Action = func(c *cli.Context) error {
argv := []string{"system-docker", "run", "-it", "--privileged", "--rm",
"--net", "host", "--pid", "host", "--ipc", "host",
"-v", "/usr/bin/docker:/usr/bin/docker.dist:ro",
"-v", "/usr/bin/ros:/usr/bin/dockerlaunch:ro",
"-v", "/usr/bin/ros:/usr/bin/user-docker:ro",
"-v", "/usr/bin/ros:/usr/bin/system-docker:ro",
"-v", "/usr/bin/ros:/sbin/poweroff:ro",
"-v", "/usr/bin/ros:/sbin/reboot:ro",
"-v", "/usr/bin/ros:/sbin/halt:ro",
"-v", "/usr/bin/ros:/sbin/shutdown:ro",
"-v", "/usr/bin/ros:/usr/bin/respawn:ro",
"-v", "/usr/bin/ros:/usr/bin/ros:ro",
"-v", "/usr/bin/ros:/usr/bin/cloud-init:ro",
"-v", "/usr/bin/ros:/usr/sbin/netconf:ro",
"-v", "/usr/bin/ros:/usr/sbin/wait-for-network:ro",
"-v", "/usr/bin/ros:/usr/sbin/wait-for-docker:ro",
"-v", "/var/lib/docker:/var/lib/docker",
"-v", "/var/lib/rkt:/var/lib/rkt",
"-v", "/dev:/host/dev",
"-v", "/etc/docker:/etc/docker",
"-v", "/etc/hosts:/etc/hosts",
"-v", "/etc/resolv.conf:/etc/resolv.conf",
"-v", "/etc/rkt:/etc/rkt",
"-v", "/etc/ssl/certs/ca-certificates.crt:/etc/ssl/certs/ca-certificates.crt.rancher",
"-v", "/lib/firmware:/lib/firmware",
"-v", "/lib/modules:/lib/modules",
"-v", "/run:/run",
"-v", "/usr/share/ros:/usr/share/ros",
"-v", "/var/lib/rancher/conf:/var/lib/rancher/conf",
"-v", "/var/lib/rancher:/var/lib/rancher",
"-v", "/var/log:/var/log",
"-v", "/var/run:/var/run",
"-v", "/home:/home",
"-v", "/opt:/opt",
"-v", "/etc/selinux:/etc/selinux",
"-v", "/var/lib/selinux:/var/lib/selinux",
"-v", "/usr/share/selinux:/usr/share/selinux",
fmt.Sprintf("%s/os-selinuxtools:%s%s", config.OS_REPO, config.VERSION, config.SUFFIX), "bash"}
syscall.Exec("/bin/system-docker", argv, []string{})
return nil
}
return app
}

View File

@@ -11,15 +11,19 @@ import (
"github.com/docker/libcompose/project"
"github.com/rancher/os/compose"
"github.com/rancher/os/config"
"github.com/rancher/os/util/network"
"github.com/rancher/os/util"
)
type projectFactory struct {
}
func (p *projectFactory) Create(c *cli.Context) (project.APIProject, error) {
cfg := config.LoadConfig()
return compose.GetProject(cfg, true, false)
func (p *projectFactory) Create(c *cli.Context) (*project.Project, error) {
cfg, err := config.LoadConfig()
if err != nil {
return nil, err
}
return compose.GetProject(cfg, true)
}
func beforeApp(c *cli.Context) error {
@@ -82,13 +86,12 @@ func serviceSubCommands() []cli.Command {
}
}
func updateIncludedServices(cfg *config.CloudConfig) error {
return config.Set("rancher.services_include", cfg.Rancher.ServicesInclude)
}
func disable(c *cli.Context) error {
func disable(c *cli.Context) {
changed := false
cfg := config.LoadConfig()
cfg, err := config.LoadConfig()
if err != nil {
logrus.Fatal(err)
}
for _, service := range c.Args() {
if _, ok := cfg.Rancher.ServicesInclude[service]; !ok {
@@ -100,17 +103,18 @@ func disable(c *cli.Context) error {
}
if changed {
if err := updateIncludedServices(cfg); err != nil {
if err = cfg.Save(); err != nil {
logrus.Fatal(err)
}
}
return nil
}
func del(c *cli.Context) error {
func del(c *cli.Context) {
changed := false
cfg := config.LoadConfig()
cfg, err := config.LoadConfig()
if err != nil {
logrus.Fatal(err)
}
for _, service := range c.Args() {
if _, ok := cfg.Rancher.ServicesInclude[service]; !ok {
@@ -121,52 +125,51 @@ func del(c *cli.Context) error {
}
if changed {
if err := updateIncludedServices(cfg); err != nil {
if err = cfg.Save(); err != nil {
logrus.Fatal(err)
}
}
return nil
}
func enable(c *cli.Context) error {
cfg := config.LoadConfig()
var enabledServices []string
func enable(c *cli.Context) {
changed := false
cfg, err := config.LoadConfig()
if err != nil {
logrus.Fatal(err)
}
for _, service := range c.Args() {
if val, ok := cfg.Rancher.ServicesInclude[service]; !ok || !val {
if strings.HasPrefix(service, "/") && !strings.HasPrefix(service, "/var/lib/rancher/conf") {
logrus.Fatalf("ERROR: Service should be in path /var/lib/rancher/conf")
}
if _, err := compose.LoadServiceResource(service, true, cfg); err != nil {
logrus.Fatalf("could not load service %s", service)
}
cfg.Rancher.ServicesInclude[service] = true
enabledServices = append(enabledServices, service)
changed = true
}
}
if len(enabledServices) > 0 {
if err := compose.StageServices(cfg, enabledServices...); err != nil {
logrus.Fatal(err)
}
if err := updateIncludedServices(cfg); err != nil {
if changed {
if err := cfg.Save(); err != nil {
logrus.Fatal(err)
}
}
return nil
}
func list(c *cli.Context) error {
cfg := config.LoadConfig()
func list(c *cli.Context) {
cfg, err := config.LoadConfig()
if err != nil {
logrus.Fatal(err)
}
clone := make(map[string]bool)
for service, enabled := range cfg.Rancher.ServicesInclude {
clone[service] = enabled
}
services, err := network.GetServices(cfg.Rancher.Repositories.ToArray())
services, err := util.GetServices(cfg.Rancher.Repositories.ToArray())
if err != nil {
logrus.Fatalf("Failed to get services: %v", err)
}
@@ -191,6 +194,4 @@ func list(c *cli.Context) error {
fmt.Printf("disabled %s\n", service)
}
}
return nil
}

View File

@@ -10,20 +10,11 @@ import (
"github.com/codegangsta/cli"
machineUtil "github.com/docker/machine/utils"
"github.com/rancher/os/config"
"github.com/rancher/os/util"
)
const (
NAME string = "rancher"
BITS int = 2048
ServerTlsPath string = "/etc/docker/tls"
ClientTlsPath string = "/home/rancher/.docker"
Cert string = "cert.pem"
Key string = "key.pem"
ServerCert string = "server-cert.pem"
ServerKey string = "server-key.pem"
CaCert string = "ca.pem"
CaKey string = "ca-key.pem"
NAME string = "rancher"
BITS int = 2048
)
func tlsConfCommands() []cli.Command {
@@ -53,81 +44,101 @@ func tlsConfCommands() []cli.Command {
}
}
func writeCerts(generateServer bool, hostname []string, certPath, keyPath, caCertPath, caKeyPath string) error {
func writeCerts(generateServer bool, hostname []string, cfg *config.CloudConfig, certPath, keyPath, caCertPath, caKeyPath string) error {
if !generateServer {
return machineUtil.GenerateCert([]string{""}, certPath, keyPath, caCertPath, caKeyPath, NAME, BITS)
}
if err := machineUtil.GenerateCert(hostname, certPath, keyPath, caCertPath, caKeyPath, NAME, BITS); err != nil {
if cfg.Rancher.Docker.ServerKey == "" || cfg.Rancher.Docker.ServerCert == "" {
err := machineUtil.GenerateCert(hostname, certPath, keyPath, caCertPath, caKeyPath, NAME, BITS)
if err != nil {
return err
}
cert, err := ioutil.ReadFile(certPath)
if err != nil {
return err
}
key, err := ioutil.ReadFile(keyPath)
if err != nil {
return err
}
cfg, err = cfg.Merge(map[interface{}]interface{}{
"rancher": map[interface{}]interface{}{
"docker": map[interface{}]interface{}{
"server_cert": string(cert),
"server_key": string(key),
},
},
})
if err != nil {
return err
}
return cfg.Save() // certPath, keyPath are already written to by machineUtil.GenerateCert()
}
if err := ioutil.WriteFile(certPath, []byte(cfg.Rancher.Docker.ServerCert), 0400); err != nil {
return err
}
cert, err := ioutil.ReadFile(certPath)
if err != nil {
return err
}
return ioutil.WriteFile(keyPath, []byte(cfg.Rancher.Docker.ServerKey), 0400)
key, err := ioutil.ReadFile(keyPath)
if err != nil {
return err
}
// certPath, keyPath are already written to by machineUtil.GenerateCert()
if err := config.Set("rancher.docker.server_cert", string(cert)); err != nil {
return err
}
if err := config.Set("rancher.docker.server_key", string(key)); err != nil {
return err
}
return nil
}
func writeCaCerts(cfg *config.CloudConfig, caCertPath, caKeyPath string) error {
func writeCaCerts(cfg *config.CloudConfig, caCertPath, caKeyPath string) (*config.CloudConfig, error) {
if cfg.Rancher.Docker.CACert == "" {
if err := machineUtil.GenerateCACertificate(caCertPath, caKeyPath, NAME, BITS); err != nil {
return err
return nil, err
}
caCert, err := ioutil.ReadFile(caCertPath)
if err != nil {
return err
return nil, err
}
caKey, err := ioutil.ReadFile(caKeyPath)
if err != nil {
return err
return nil, err
}
// caCertPath, caKeyPath are already written to by machineUtil.GenerateCACertificate()
if err := config.Set("rancher.docker.ca_cert", string(caCert)); err != nil {
return err
}
if err := config.Set("rancher.docker.ca_key", string(caKey)); err != nil {
return err
}
} else {
cfg = config.LoadConfig()
if err := util.WriteFileAtomic(caCertPath, []byte(cfg.Rancher.Docker.CACert), 0400); err != nil {
return err
cfg, err = cfg.Merge(map[interface{}]interface{}{
"rancher": map[interface{}]interface{}{
"docker": map[interface{}]interface{}{
"ca_key": string(caKey),
"ca_cert": string(caCert),
},
},
})
if err != nil {
return nil, err
}
if err := util.WriteFileAtomic(caKeyPath, []byte(cfg.Rancher.Docker.CAKey), 0400); err != nil {
return err
if err = cfg.Save(); err != nil {
return nil, err
}
return cfg, nil // caCertPath, caKeyPath are already written to by machineUtil.GenerateCACertificate()
}
return nil
if err := ioutil.WriteFile(caCertPath, []byte(cfg.Rancher.Docker.CACert), 0400); err != nil {
return nil, err
}
if err := ioutil.WriteFile(caKeyPath, []byte(cfg.Rancher.Docker.CAKey), 0400); err != nil {
return nil, err
}
return cfg, nil
}
func tlsConfCreate(c *cli.Context) error {
func tlsConfCreate(c *cli.Context) {
err := generate(c)
if err != nil {
log.Fatal(err)
}
return nil
}
func generate(c *cli.Context) error {
@@ -139,22 +150,27 @@ func generate(c *cli.Context) error {
}
func Generate(generateServer bool, outDir string, hostnames []string) error {
cfg, err := config.LoadConfig()
if err != nil {
return err
}
if outDir == "" {
if generateServer {
outDir = ServerTlsPath
outDir = "/etc/docker/tls"
} else {
outDir = ClientTlsPath
outDir = "/home/rancher/.docker"
}
log.Infof("Out directory (-d, --dir) not specified, using default: %s", outDir)
}
caCertPath := filepath.Join(outDir, CaCert)
caKeyPath := filepath.Join(outDir, CaKey)
certPath := filepath.Join(outDir, Cert)
keyPath := filepath.Join(outDir, Key)
caCertPath := filepath.Join(outDir, "ca.pem")
caKeyPath := filepath.Join(outDir, "ca-key.pem")
certPath := filepath.Join(outDir, "cert.pem")
keyPath := filepath.Join(outDir, "key.pem")
if generateServer {
certPath = filepath.Join(outDir, ServerCert)
keyPath = filepath.Join(outDir, ServerKey)
certPath = filepath.Join(outDir, "server-cert.pem")
keyPath = filepath.Join(outDir, "server-key.pem")
}
if _, err := os.Stat(outDir); os.IsNotExist(err) {
@@ -163,11 +179,11 @@ func Generate(generateServer bool, outDir string, hostnames []string) error {
}
}
cfg := config.LoadConfig()
if err := writeCaCerts(cfg, caCertPath, caKeyPath); err != nil {
cfg, err = writeCaCerts(cfg, caCertPath, caKeyPath)
if err != nil {
return err
}
if err := writeCerts(generateServer, hostnames, certPath, keyPath, caCertPath, caKeyPath); err != nil {
if err := writeCerts(generateServer, hostnames, cfg, certPath, keyPath, caCertPath, caKeyPath); err != nil {
return err
}

View File

@@ -1,19 +0,0 @@
package control
import (
"bufio"
"fmt"
"strings"
log "github.com/Sirupsen/logrus"
)
func yes(in *bufio.Reader, question string) bool {
fmt.Printf("%s [y/N]: ", question)
line, err := in.ReadString('\n')
if err != nil {
log.Fatal(err)
}
return strings.ToLower(line[0:1]) == "y"
}

View File

@@ -1,79 +0,0 @@
package dockerinit
import (
"fmt"
"io/ioutil"
"os"
"path"
"strings"
"syscall"
"time"
log "github.com/Sirupsen/logrus"
"github.com/rancher/os/config"
"github.com/rancher/os/util"
)
const (
consoleDone = "/run/console-done"
dockerConf = "/var/lib/rancher/conf/docker"
dockerDone = "/run/docker-done"
dockerLog = "/var/log/docker.log"
)
func Main() {
for {
if _, err := os.Stat(consoleDone); err == nil {
break
}
time.Sleep(200 * time.Millisecond)
}
dockerBin := "/usr/bin/docker"
for _, binPath := range []string{
"/opt/bin",
"/usr/local/bin",
"/var/lib/rancher/docker",
} {
if util.ExistsAndExecutable(path.Join(binPath, "dockerd")) {
dockerBin = path.Join(binPath, "dockerd")
break
}
if util.ExistsAndExecutable(path.Join(binPath, "docker")) {
dockerBin = path.Join(binPath, "docker")
break
}
}
if err := syscall.Mount("", "/", "", syscall.MS_SHARED|syscall.MS_REC, ""); err != nil {
log.Error(err)
}
if err := syscall.Mount("", "/run", "", syscall.MS_SHARED|syscall.MS_REC, ""); err != nil {
log.Error(err)
}
mountInfo, err := ioutil.ReadFile("/proc/self/mountinfo")
if err != nil {
log.Fatal(err)
}
for _, mount := range strings.Split(string(mountInfo), "\n") {
if strings.Contains(mount, "/var/lib/docker /var/lib/docker") && strings.Contains(mount, "rootfs") {
os.Setenv("DOCKER_RAMDISK", "1")
}
}
args := []string{
"bash",
"-c",
fmt.Sprintf(`[ -e %s ] && source %s; exec /usr/bin/dockerlaunch %s %s $DOCKER_OPTS >> %s 2>&1`, dockerConf, dockerConf, dockerBin, strings.Join(os.Args[1:], " "), dockerLog),
}
cfg := config.LoadConfig()
if err := ioutil.WriteFile(dockerDone, []byte(cfg.Rancher.Docker.Engine), 0644); err != nil {
log.Error(err)
}
log.Fatal(syscall.Exec("/bin/bash", args, os.Environ()))
}

View File

@@ -1,82 +1,83 @@
package network
import (
"flag"
"bufio"
"fmt"
"io/ioutil"
"os"
"golang.org/x/net/context"
"os/exec"
"strings"
log "github.com/Sirupsen/logrus"
"github.com/docker/libnetwork/resolvconf"
"github.com/rancher/netconf"
"github.com/rancher/os/cmd/cloudinit"
"github.com/rancher/os/config"
"github.com/rancher/os/docker"
"github.com/rancher/os/hostname"
)
var (
stopNetworkPre bool
flags *flag.FlagSet
const (
NETWORK_DONE = "/var/run/network.done"
WAIT_FOR_NETWORK = "wait-for-network"
)
func init() {
flags = flag.NewFlagSet(os.Args[0], flag.ContinueOnError)
flags.BoolVar(&stopNetworkPre, "stop-network-pre", false, "")
func sendTerm(proc string) {
cmd := exec.Command("killall", "-TERM", proc)
cmd.Stderr = os.Stderr
cmd.Stdout = os.Stdout
cmd.Run()
}
func Main() {
flags.Parse(os.Args[1:])
log.Infof("Running network: stop-network-pre=%v", stopNetworkPre)
if stopNetworkPre {
client, err := docker.NewSystemClient()
if err != nil {
log.Error(err)
}
err = client.ContainerStop(context.Background(), "network-pre", 10)
if err != nil {
log.Error(err)
}
_, err = client.ContainerWait(context.Background(), "network-pre")
if err != nil {
log.Error(err)
}
args := os.Args
if len(args) > 1 {
fmt.Println("call " + args[0] + " to load network config from cloud-config.yml")
return
}
cfg := config.LoadConfig()
nameservers := cfg.Rancher.Network.Dns.Nameservers
search := cfg.Rancher.Network.Dns.Search
userSetDns := len(nameservers) > 0 || len(search) > 0
if !userSetDns {
nameservers = cfg.Rancher.Defaults.Network.Dns.Nameservers
search = cfg.Rancher.Defaults.Network.Dns.Search
os.Remove(NETWORK_DONE) // ignore error
cfg, err := config.LoadConfig()
if err != nil {
log.Fatal(err)
}
if _, err := resolvconf.Build("/etc/resolv.conf", nameservers, search, nil); err != nil {
log.Error(err)
}
if err := hostname.SetHostnameFromCloudConfig(cfg); err != nil {
log.Error(err)
}
hostname, _ := cloudinit.SetHostname(cfg) // ignore error
log.Infof("Network: hostname: '%s'", hostname)
if err := netconf.ApplyNetworkConfigs(&cfg.Rancher.Network); err != nil {
log.Error(err)
}
userSetHostname := cfg.Hostname != ""
if err := netconf.RunDhcp(&cfg.Rancher.Network, !userSetHostname, !userSetDns); err != nil {
log.Error(err)
hostname, _ = cloudinit.SetHostname(cfg) // ignore error
log.Infof("Network: hostname: '%s' (from DHCP, if not set by cloud-config)", hostname)
if hostname != "" {
hosts, err := os.Open("/etc/hosts")
defer hosts.Close()
if err != nil {
log.Fatal(err)
}
lines := bufio.NewScanner(hosts)
hostsContent := ""
for lines.Scan() {
line := strings.TrimSpace(lines.Text())
fields := strings.Fields(line)
if len(fields) > 0 && fields[0] == "127.0.1.1" {
hostsContent += "127.0.1.1 " + hostname + "\n"
continue
}
hostsContent += line + "\n"
}
if err := ioutil.WriteFile("/etc/hosts", []byte(hostsContent), 0600); err != nil {
log.Error(err)
}
}
if err := hostname.SyncHostname(); err != nil {
log.Error(err)
if cfg.Rancher.Network.Dns.Override {
log.WithFields(log.Fields{"nameservers": cfg.Rancher.Network.Dns.Nameservers}).Info("Override nameservers")
if _, err := resolvconf.Build("/etc/resolv.conf", cfg.Rancher.Network.Dns.Nameservers, cfg.Rancher.Network.Dns.Search, nil); err != nil {
log.Error(err)
}
}
if f, err := os.Create(NETWORK_DONE); err != nil {
log.Error(err)
} else {
f.Close()
}
sendTerm(WAIT_FOR_NETWORK)
select {}
}

View File

@@ -1,6 +1,7 @@
package power
import (
"bufio"
"errors"
"os"
"path/filepath"
@@ -8,15 +9,14 @@ import (
"strings"
"syscall"
"golang.org/x/net/context"
log "github.com/Sirupsen/logrus"
"github.com/docker/engine-api/types"
"github.com/docker/engine-api/types/container"
"github.com/docker/engine-api/types/filters"
dockerClient "github.com/fsouza/go-dockerclient"
"github.com/rancher/os/docker"
"github.com/rancher/os/util"
)
const (
DOCKER_CGROUPS_FILE = "/proc/self/cgroup"
)
func runDocker(name string) error {
@@ -36,10 +36,11 @@ func runDocker(name string) error {
cmd = os.Args
}
existing, err := client.ContainerInspect(context.Background(), name)
if err == nil && existing.ID != "" {
err := client.ContainerRemove(context.Background(), types.ContainerRemoveOptions{
ContainerID: existing.ID,
exiting, err := client.InspectContainer(name)
if exiting != nil {
err := client.RemoveContainer(dockerClient.RemoveContainerOptions{
ID: exiting.ID,
Force: true,
})
if err != nil {
@@ -47,50 +48,53 @@ func runDocker(name string) error {
}
}
currentContainerId, err := util.GetCurrentContainerId()
currentContainerId, err := getCurrentContainerId()
if err != nil {
return err
}
currentContainer, err := client.ContainerInspect(context.Background(), currentContainerId)
currentContainer, err := client.InspectContainer(currentContainerId)
if err != nil {
return err
}
powerContainer, err := client.ContainerCreate(context.Background(),
&container.Config{
powerContainer, err := client.CreateContainer(dockerClient.CreateContainerOptions{
Name: name,
Config: &dockerClient.Config{
Image: currentContainer.Config.Image,
Cmd: cmd,
Env: []string{
"IN_DOCKER=true",
},
},
&container.HostConfig{
HostConfig: &dockerClient.HostConfig{
PidMode: "host",
VolumesFrom: []string{
currentContainer.ID,
},
Privileged: true,
}, nil, name)
},
})
if err != nil {
return err
}
go func() {
client.ContainerAttach(context.Background(), types.ContainerAttachOptions{
ContainerID: powerContainer.ID,
Stream: true,
Stderr: true,
Stdout: true,
client.AttachToContainer(dockerClient.AttachToContainerOptions{
Container: powerContainer.ID,
OutputStream: os.Stdout,
ErrorStream: os.Stderr,
Stderr: true,
Stdout: true,
})
}()
err = client.ContainerStart(context.Background(), powerContainer.ID)
err = client.StartContainer(powerContainer.ID, powerContainer.HostConfig)
if err != nil {
return err
}
_, err = client.ContainerWait(context.Background(), powerContainer.ID)
_, err = client.WaitContainer(powerContainer.ID)
if err != nil {
log.Fatal(err)
@@ -168,20 +172,19 @@ func shutDownContainers() error {
return err
}
filter := filters.NewArgs()
filter.Add("status", "running")
opts := types.ContainerListOptions{
All: true,
Filter: filter,
opts := dockerClient.ListContainersOptions{
All: true,
Filters: map[string][]string{
"status": {"running"},
},
}
containers, err := client.ContainerList(context.Background(), opts)
containers, err := client.ListContainers(opts)
if err != nil {
return err
}
currentContainerId, err := util.GetCurrentContainerId()
currentContainerId, err := getCurrentContainerId()
if err != nil {
return err
}
@@ -194,7 +197,7 @@ func shutDownContainers() error {
}
log.Infof("Stopping %s : %v", container.ID[:12], container.Names)
stopErr := client.ContainerStop(context.Background(), container.ID, timeout)
stopErr := client.StopContainer(container.ID, uint(timeout))
if stopErr != nil {
stopErrorStrings = append(stopErrorStrings, " ["+container.ID+"] "+stopErr.Error())
}
@@ -206,7 +209,7 @@ func shutDownContainers() error {
if container.ID == currentContainerId {
continue
}
_, waitErr := client.ContainerWait(context.Background(), container.ID)
_, waitErr := client.WaitContainer(container.ID)
if waitErr != nil {
waitErrorStrings = append(waitErrorStrings, " ["+container.ID+"] "+waitErr.Error())
}
@@ -218,3 +221,35 @@ func shutDownContainers() error {
return nil
}
func getCurrentContainerId() (string, error) {
file, err := os.Open(DOCKER_CGROUPS_FILE)
if err != nil {
return "", err
}
fileReader := bufio.NewScanner(file)
if !fileReader.Scan() {
return "", errors.New("Empty file /proc/self/cgroup")
}
line := fileReader.Text()
parts := strings.Split(line, "/")
for len(parts) != 3 {
if !fileReader.Scan() {
return "", errors.New("Found no docker cgroups")
}
line = fileReader.Text()
parts = strings.Split(line, "/")
if len(parts) == 3 {
if strings.HasSuffix(parts[1], "docker") {
break
} else {
parts = nil
}
}
}
return parts[len(parts)-1:][0], nil
}

View File

@@ -31,7 +31,7 @@ func Main() {
app.Run(os.Args)
}
func shutdown(c *cli.Context) error {
func shutdown(c *cli.Context) {
common("")
reboot := c.String("r")
poweroff := c.String("h")
@@ -41,6 +41,4 @@ func shutdown(c *cli.Context) error {
} else if poweroff == "now" {
PowerOff()
}
return nil
}

View File

@@ -48,7 +48,7 @@ func setupSigterm() {
}()
}
func run(c *cli.Context) error {
func run(c *cli.Context) {
setupSigterm()
var stream io.Reader = os.Stdin
@@ -79,7 +79,6 @@ func run(c *cli.Context) error {
}
wg.Wait()
return nil
}
func addProcess(process *os.Process) {

View File

@@ -1,45 +0,0 @@
package switchconsole
import (
"os"
log "github.com/Sirupsen/logrus"
"github.com/docker/libcompose/project/options"
"github.com/rancher/os/compose"
"github.com/rancher/os/config"
"golang.org/x/net/context"
)
func Main() {
if len(os.Args) != 2 {
log.Fatal("Must specify exactly one existing container")
}
newConsole := os.Args[1]
cfg := config.LoadConfig()
project, err := compose.GetProject(cfg, true, false)
if err != nil {
log.Fatal(err)
}
if newConsole != "default" {
if err = compose.LoadSpecialService(project, cfg, "console", newConsole); err != nil {
log.Fatal(err)
}
}
if err = config.Set("rancher.console", newConsole); err != nil {
log.Errorf("Failed to update 'rancher.console': %v", err)
}
if err = project.Up(context.Background(), options.Up{
Log: true,
}, "console"); err != nil {
log.Fatal(err)
}
if err = project.Restart(context.Background(), 10, "docker"); err != nil {
log.Errorf("Failed to restart Docker: %v", err)
}
}

View File

@@ -1,21 +1,26 @@
package systemdocker
import (
"log"
"os"
"strings"
"syscall"
"github.com/docker/docker/docker"
log "github.com/Sirupsen/logrus"
"github.com/rancher/os/config"
)
func Main() {
if os.Geteuid() != 0 {
log.Fatalf("%s: Need to be root", os.Args[0])
var newEnv []string
for _, env := range os.Environ() {
if !strings.HasPrefix(env, "DOCKER_HOST=") {
newEnv = append(newEnv, env)
}
}
if os.Getenv("DOCKER_HOST") == "" {
os.Setenv("DOCKER_HOST", config.DOCKER_SYSTEM_HOST)
}
newEnv = append(newEnv, "DOCKER_HOST="+config.DOCKER_SYSTEM_HOST)
docker.Main()
os.Args[0] = config.DOCKER_DIST_BIN
if err := syscall.Exec(os.Args[0], os.Args, newEnv); err != nil {
log.Fatal(err)
}
}

View File

@@ -1,183 +1,151 @@
package userdocker
import (
"io"
"io/ioutil"
"bufio"
"encoding/json"
"fmt"
"os"
"path"
"os/exec"
"os/signal"
"strings"
"syscall"
"time"
"golang.org/x/net/context"
"path/filepath"
log "github.com/Sirupsen/logrus"
composeClient "github.com/docker/libcompose/docker/client"
"github.com/docker/libcompose/docker"
"github.com/docker/libcompose/project"
"github.com/rancher/os/cmd/control"
"github.com/rancher/os/compose"
"github.com/rancher/os/config"
rosDocker "github.com/rancher/os/docker"
"github.com/rancher/os/util"
"github.com/opencontainers/runc/libcontainer/cgroups"
_ "github.com/opencontainers/runc/libcontainer/nsenter"
"github.com/opencontainers/runc/libcontainer/system"
)
const (
DEFAULT_STORAGE_CONTEXT = "console"
DOCKER_PID_FILE = "/var/run/docker.pid"
DOCKER_COMMAND = "docker-init"
userDocker = "user-docker"
sourceDirectory = "/engine"
destDirectory = "/var/lib/rancher/engine"
)
func Main() {
if err := copyBinaries(sourceDirectory, destDirectory); err != nil {
cfg, err := config.LoadConfig()
if err != nil {
log.Fatal(err)
}
if err := syscall.Mount("/host/sys", "/sys", "", syscall.MS_BIND|syscall.MS_REC, ""); err != nil {
log.Fatal(err)
if len(os.Args) == 1 {
if err := enter(cfg); err != nil {
log.Fatal(err)
}
} else {
if err := main(cfg); err != nil {
log.Fatal(err)
}
}
cfg := config.LoadConfig()
log.Fatal(startDocker(cfg))
}
func copyBinaries(source, dest string) error {
if err := os.MkdirAll(dest, 0755); err != nil {
return err
func enter(cfg *config.CloudConfig) error {
context := cfg.Rancher.Docker.StorageContext
if context == "" {
context = DEFAULT_STORAGE_CONTEXT
}
files, err := ioutil.ReadDir(dest)
log.Infof("Starting Docker in context: %s", context)
p, err := compose.GetProject(cfg, true)
if err != nil {
return err
}
for _, file := range files {
if err = os.RemoveAll(path.Join(dest, file.Name())); err != nil {
return err
}
}
files, err = ioutil.ReadDir(source)
pid, err := waitForPid(context, p)
if err != nil {
return err
}
for _, file := range files {
sourceFile := path.Join(source, file.Name())
destFile := path.Join(dest, file.Name())
log.Infof("%s PID %d", context, pid)
in, err := os.Open(sourceFile)
if err != nil {
return err
}
out, err := os.Create(destFile)
if err != nil {
return err
}
if _, err = io.Copy(out, in); err != nil {
return err
}
if err = out.Sync(); err != nil {
return err
}
if err = in.Close(); err != nil {
return err
}
if err = out.Close(); err != nil {
return err
}
if err := os.Chmod(destFile, 0751); err != nil {
return err
}
}
return nil
return runNsenter(pid)
}
func writeCerts(cfg *config.CloudConfig) error {
outDir := control.ServerTlsPath
if err := os.MkdirAll(outDir, 0700); err != nil {
return err
}
caCertPath := filepath.Join(outDir, control.CaCert)
caKeyPath := filepath.Join(outDir, control.CaKey)
serverCertPath := filepath.Join(outDir, control.ServerCert)
serverKeyPath := filepath.Join(outDir, control.ServerKey)
if cfg.Rancher.Docker.CACert != "" {
if err := util.WriteFileAtomic(caCertPath, []byte(cfg.Rancher.Docker.CACert), 0400); err != nil {
return err
}
if err := util.WriteFileAtomic(caKeyPath, []byte(cfg.Rancher.Docker.CAKey), 0400); err != nil {
return err
}
}
if cfg.Rancher.Docker.ServerCert != "" {
if err := util.WriteFileAtomic(serverCertPath, []byte(cfg.Rancher.Docker.ServerCert), 0400); err != nil {
return err
}
if err := util.WriteFileAtomic(serverKeyPath, []byte(cfg.Rancher.Docker.ServerKey), 0400); err != nil {
return err
}
}
return nil
type result struct {
Pid int `json:"Pid"`
}
func startDocker(cfg *config.CloudConfig) error {
storageContext := cfg.Rancher.Docker.StorageContext
if storageContext == "" {
storageContext = DEFAULT_STORAGE_CONTEXT
}
func findProgram(searchPaths ...string) string {
prog := ""
log.Infof("Starting Docker in context: %s", storageContext)
p, err := compose.GetProject(cfg, true, false)
if err != nil {
return err
}
pid, err := waitForPid(storageContext, p)
if err != nil {
return err
}
log.Infof("%s PID %d", storageContext, pid)
client, err := rosDocker.NewSystemClient()
if err != nil {
return err
}
dockerCfg := cfg.Rancher.Docker
args := dockerCfg.FullArgs()
log.Debugf("User Docker args: %v", args)
if dockerCfg.TLS {
if err := writeCerts(cfg); err != nil {
return err
for _, i := range searchPaths {
var err error
prog, err = exec.LookPath(i)
if err == nil {
break
}
prog = i
}
info, err := client.ContainerInspect(context.Background(), storageContext)
return prog
}
func runNsenter(pid int) error {
args := []string{findProgram(userDocker), "main"}
r, w, err := os.Pipe()
if err != nil {
return err
}
cmd := []string{"docker-runc", "exec", "--", info.ID, "env"}
log.Info(dockerCfg.AppendEnv())
cmd = append(cmd, dockerCfg.AppendEnv()...)
cmd = append(cmd, DOCKER_COMMAND)
cmd = append(cmd, args...)
log.Infof("Running %v", cmd)
cmd := &exec.Cmd{
Path: args[0],
Args: args,
Stdin: os.Stdin,
Stdout: os.Stdout,
Stderr: os.Stderr,
ExtraFiles: []*os.File{w},
Env: append(os.Environ(),
"_LIBCONTAINER_INITPIPE=3",
fmt.Sprintf("_LIBCONTAINER_INITPID=%d", pid),
),
}
return syscall.Exec("/usr/bin/ros", cmd, os.Environ())
if err := cmd.Start(); err != nil {
return err
}
w.Close()
var result result
if err := json.NewDecoder(r).Decode(&result); err != nil {
return err
}
if err := cmd.Wait(); err != nil {
return err
}
log.Infof("Docker PID %d", result.Pid)
p, err := os.FindProcess(result.Pid)
if err != nil {
return err
}
handleTerm(p)
if err := switchCgroup(result.Pid, pid); err != nil {
return err
}
_, err = p.Wait()
return err
}
func handleTerm(p *os.Process) {
term := make(chan os.Signal)
signal.Notify(term, syscall.SIGTERM)
go func() {
<-term
p.Signal(syscall.SIGTERM)
}()
}
func waitForPid(service string, project *project.Project) (int, error) {
@@ -198,7 +166,7 @@ func getPid(service string, project *project.Project) (int, error) {
return 0, err
}
containers, err := s.Containers(context.Background())
containers, err := s.Containers()
if err != nil {
return 0, err
}
@@ -207,7 +175,7 @@ func getPid(service string, project *project.Project) (int, error) {
return 0, nil
}
client, err := composeClient.Create(composeClient.Options{
client, err := docker.CreateClient(docker.ClientOpts{
Host: config.DOCKER_SYSTEM_HOST,
})
if err != nil {
@@ -219,8 +187,8 @@ func getPid(service string, project *project.Project) (int, error) {
return 0, err
}
info, err := client.ContainerInspect(context.Background(), id)
if err != nil || info.ID == "" {
info, err := client.InspectContainer(id)
if err != nil || info == nil {
return 0, err
}
@@ -230,3 +198,71 @@ func getPid(service string, project *project.Project) (int, error) {
return 0, nil
}
func main(cfg *config.CloudConfig) error {
os.Unsetenv("_LIBCONTAINER_INITPIPE")
os.Unsetenv("_LIBCONTAINER_INITPID")
if err := system.ParentDeathSignal(syscall.SIGKILL).Set(); err != nil {
return err
}
if err := os.Remove("/var/run/docker.pid"); err != nil && !os.IsNotExist(err) {
return err
}
dockerCfg := cfg.Rancher.Docker
args := dockerCfg.FullArgs()
log.Debugf("User Docker args: %v", args)
if dockerCfg.TLS {
log.Debug("Generating TLS certs if needed")
if err := control.Generate(true, "/etc/docker/tls", []string{"127.0.0.1", "*", "*.*", "*.*.*", "*.*.*.*"}); err != nil {
return err
}
}
prog := findProgram("docker-init", "dockerlaunch", "docker")
if strings.Contains(prog, "dockerlaunch") {
args = append([]string{prog, "docker"}, args...)
} else {
args = append([]string{prog}, args...)
}
log.Infof("Running %v", args)
return syscall.Exec(args[0], args, dockerCfg.AppendEnv())
}
func switchCgroup(src, target int) error {
cgroupFile := fmt.Sprintf("/proc/%d/cgroup", target)
f, err := os.Open(cgroupFile)
if err != nil {
return err
}
defer f.Close()
targetCgroups := map[string]string{}
s := bufio.NewScanner(f)
for s.Scan() {
text := s.Text()
parts := strings.Split(text, ":")
subparts := strings.Split(parts[1], "=")
subsystem := subparts[0]
if len(subparts) > 1 {
subsystem = subparts[1]
}
targetPath := fmt.Sprintf("/host/sys/fs/cgroup/%s%s", subsystem, parts[2])
log.Infof("Moving Docker to cgroup %s", targetPath)
targetCgroups[subsystem] = targetPath
}
if err := s.Err(); err != nil {
return err
}
return cgroups.EnterPid(targetCgroups, src)
}

View File

@@ -0,0 +1,23 @@
package waitfornetwork
import (
"github.com/rancher/os/cmd/network"
"os"
"os/signal"
"syscall"
)
func handleTerm() {
c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGTERM)
<-c
os.Exit(0)
}
func Main() {
go handleTerm()
if _, err := os.Stat(network.NETWORK_DONE); err == nil {
os.Exit(0)
}
select {}
}

View File

@@ -1,31 +1,26 @@
package compose
import (
"fmt"
"golang.org/x/net/context"
log "github.com/Sirupsen/logrus"
yaml "github.com/cloudfoundry-incubator/candiedyaml"
"github.com/docker/libcompose/cli/logger"
composeConfig "github.com/docker/libcompose/config"
"github.com/docker/libcompose/docker"
composeClient "github.com/docker/libcompose/docker/client"
"github.com/docker/libcompose/project"
"github.com/docker/libcompose/project/events"
"github.com/docker/libcompose/project/options"
"github.com/rancher/os/config"
rosDocker "github.com/rancher/os/docker"
"github.com/rancher/os/util"
"github.com/rancher/os/util/network"
)
func CreateService(cfg *config.CloudConfig, name string, serviceConfig *composeConfig.ServiceConfigV1) (project.Service, error) {
func CreateService(cfg *config.CloudConfig, name string, serviceConfig *project.ServiceConfig) (project.Service, error) {
if cfg == nil {
cfg = config.LoadConfig()
var err error
cfg, err = config.LoadConfig()
if err != nil {
return nil, err
}
}
p, err := CreateServiceSet("once", cfg, map[string]*composeConfig.ServiceConfigV1{
p, err := RunServiceSet("once", cfg, map[string]*project.ServiceConfig{
name: serviceConfig,
})
if err != nil {
@@ -35,123 +30,51 @@ func CreateService(cfg *config.CloudConfig, name string, serviceConfig *composeC
return p.CreateService(name)
}
func CreateServiceSet(name string, cfg *config.CloudConfig, configs map[string]*composeConfig.ServiceConfigV1) (*project.Project, error) {
p, err := newProject(name, cfg, nil, nil)
func RunServiceSet(name string, cfg *config.CloudConfig, configs map[string]*project.ServiceConfig) (*project.Project, error) {
p, err := newProject(name, cfg)
if err != nil {
return nil, err
}
addServices(p, map[interface{}]interface{}{}, configs)
return p, nil
return p, p.Up()
}
func RunServiceSet(name string, cfg *config.CloudConfig, configs map[string]*composeConfig.ServiceConfigV1) (*project.Project, error) {
p, err := CreateServiceSet(name, cfg, configs)
func GetProject(cfg *config.CloudConfig, networkingAvailable bool) (*project.Project, error) {
return newCoreServiceProject(cfg, networkingAvailable)
}
func newProject(name string, cfg *config.CloudConfig) (*project.Project, error) {
clientFactory, err := rosDocker.NewClientFactory(docker.ClientOpts{})
if err != nil {
return nil, err
}
return p, p.Up(context.Background(), options.Up{
Log: cfg.Rancher.Log,
})
}
func GetProject(cfg *config.CloudConfig, networkingAvailable, loadConsole bool) (*project.Project, error) {
return newCoreServiceProject(cfg, networkingAvailable, loadConsole)
}
func newProject(name string, cfg *config.CloudConfig, environmentLookup composeConfig.EnvironmentLookup, authLookup *rosDocker.ConfigAuthLookup) (*project.Project, error) {
clientFactory, err := rosDocker.NewClientFactory(composeClient.Options{})
if err != nil {
return nil, err
}
if environmentLookup == nil {
environmentLookup = rosDocker.NewConfigEnvironment(cfg)
}
if authLookup == nil {
authLookup = rosDocker.NewConfigAuthLookup(cfg)
}
serviceFactory := &rosDocker.ServiceFactory{
Deps: map[string][]string{},
}
context := &docker.Context{
ClientFactory: clientFactory,
AuthLookup: authLookup,
Context: project.Context{
ProjectName: name,
EnvironmentLookup: environmentLookup,
NoRecreate: true, // for libcompose to not recreate on project reload, looping up the boot :)
EnvironmentLookup: rosDocker.NewConfigEnvironment(cfg),
ServiceFactory: serviceFactory,
Log: cfg.Rancher.Log,
LoggerFactory: logger.NewColorLoggerFactory(),
},
}
serviceFactory.Context = context
authLookup.SetContext(context)
return docker.NewProject(context, &composeConfig.ParseOptions{
Interpolate: true,
Validate: false,
Preprocess: preprocessServiceMap,
})
return docker.NewProject(context)
}
func preprocessServiceMap(serviceMap composeConfig.RawServiceMap) (composeConfig.RawServiceMap, error) {
newServiceMap := make(composeConfig.RawServiceMap)
for k, v := range serviceMap {
newServiceMap[k] = make(composeConfig.RawService)
for k2, v2 := range v {
if k2 == "environment" || k2 == "labels" {
newServiceMap[k][k2] = preprocess(v2, true)
} else {
newServiceMap[k][k2] = preprocess(v2, false)
}
}
}
return newServiceMap, nil
}
func preprocess(item interface{}, replaceTypes bool) interface{} {
switch typedDatas := item.(type) {
case map[interface{}]interface{}:
newMap := make(map[interface{}]interface{})
for key, value := range typedDatas {
newMap[key] = preprocess(value, replaceTypes)
}
return newMap
case []interface{}:
// newArray := make([]interface{}, 0) will cause golint to complain
var newArray []interface{}
newArray = make([]interface{}, 0)
for _, value := range typedDatas {
newArray = append(newArray, preprocess(value, replaceTypes))
}
return newArray
default:
if replaceTypes {
return fmt.Sprint(item)
}
return item
}
}
func addServices(p *project.Project, enabled map[interface{}]interface{}, configs map[string]*composeConfig.ServiceConfigV1) map[interface{}]interface{} {
serviceConfigsV2, _ := composeConfig.ConvertServices(configs)
func addServices(p *project.Project, enabled map[interface{}]interface{}, configs map[string]*project.ServiceConfig) map[interface{}]interface{} {
// Note: we ignore errors while loading services
unchanged := true
for name, serviceConfig := range serviceConfigsV2 {
hash := composeConfig.GetServiceHash(name, serviceConfig)
for name, serviceConfig := range configs {
hash := project.GetServiceHash(name, serviceConfig)
if enabled[name] == hash {
continue
@@ -184,25 +107,68 @@ func adjustContainerNames(m map[interface{}]interface{}) map[interface{}]interfa
return m
}
func newCoreServiceProject(cfg *config.CloudConfig, useNetwork, loadConsole bool) (*project.Project, error) {
environmentLookup := rosDocker.NewConfigEnvironment(cfg)
authLookup := rosDocker.NewConfigAuthLookup(cfg)
func newCoreServiceProject(cfg *config.CloudConfig, network bool) (*project.Project, error) {
projectEvents := make(chan project.Event)
enabled := map[interface{}]interface{}{}
p, err := newProject("os", cfg, environmentLookup, authLookup)
p, err := newProject("os", cfg)
if err != nil {
return nil, err
}
projectEvents := make(chan events.Event)
p.AddListener(project.NewDefaultListener(p))
p.AddListener(projectEvents)
p.ReloadCallback = projectReload(p, &useNetwork, loadConsole, environmentLookup, authLookup)
p.ReloadCallback = func() error {
var err error
cfg, err = config.LoadConfig()
if err != nil {
return err
}
enabled = addServices(p, enabled, cfg.Rancher.Services)
for service, serviceEnabled := range cfg.Rancher.ServicesInclude {
if _, ok := enabled[service]; ok || !serviceEnabled {
continue
}
bytes, err := LoadServiceResource(service, network, cfg)
if err != nil {
if err == util.ErrNoNetwork {
log.Debugf("Can not load %s, networking not enabled", service)
} else {
log.Errorf("Failed to load %s : %v", service, err)
}
continue
}
m := map[interface{}]interface{}{}
if err := yaml.Unmarshal(bytes, &m); err != nil {
log.Errorf("Failed to parse YAML configuration: %s : %v", service, err)
continue
}
bytes, err = yaml.Marshal(adjustContainerNames(config.StringifyValues(m)))
if err != nil {
log.Errorf("Failed to marshal YAML configuration: %s : %v", service, err)
continue
}
err = p.Load(bytes)
if err != nil {
log.Errorf("Failed to load %s : %v", service, err)
continue
}
enabled[service] = service
}
return nil
}
go func() {
for event := range projectEvents {
if event.EventType == events.ContainerStarted && event.ServiceName == "ntp" {
useNetwork = true
if event.EventType == project.EventContainerStarted && event.ServiceName == "network" {
network = true
}
}
}()
@@ -216,42 +182,6 @@ func newCoreServiceProject(cfg *config.CloudConfig, useNetwork, loadConsole bool
return p, nil
}
func StageServices(cfg *config.CloudConfig, services ...string) error {
p, err := newProject("stage-services", cfg, nil, nil)
if err != nil {
return err
}
for _, service := range services {
bytes, err := network.LoadServiceResource(service, true, cfg)
if err != nil {
return fmt.Errorf("Failed to load %s : %v", service, err)
}
m := map[interface{}]interface{}{}
if err := yaml.Unmarshal(bytes, &m); err != nil {
return fmt.Errorf("Failed to parse YAML configuration: %s : %v", service, err)
}
bytes, err = yaml.Marshal(m)
if err != nil {
return fmt.Errorf("Failed to marshal YAML configuration: %s : %v", service, err)
}
err = p.Load(bytes)
if err != nil {
return fmt.Errorf("Failed to load %s : %v", service, err)
}
}
// Reduce service configurations to just image and labels
for _, serviceName := range p.ServiceConfigs.Keys() {
serviceConfig, _ := p.ServiceConfigs.Get(serviceName)
p.ServiceConfigs.Add(serviceName, &composeConfig.ServiceConfig{
Image: serviceConfig.Image,
Labels: serviceConfig.Labels,
})
}
return p.Pull(context.Background())
func LoadServiceResource(name string, network bool, cfg *config.CloudConfig) ([]byte, error) {
return util.LoadResource(name, network, cfg.Rancher.Repositories.ToArray())
}

View File

@@ -1,112 +0,0 @@
package compose
import (
"fmt"
log "github.com/Sirupsen/logrus"
yaml "github.com/cloudfoundry-incubator/candiedyaml"
composeConfig "github.com/docker/libcompose/config"
"github.com/docker/libcompose/project"
"github.com/rancher/os/config"
"github.com/rancher/os/docker"
"github.com/rancher/os/util/network"
)
func LoadService(p *project.Project, cfg *config.CloudConfig, useNetwork bool, service string) error {
bytes, err := network.LoadServiceResource(service, useNetwork, cfg)
if err != nil {
return err
}
m := map[interface{}]interface{}{}
if err = yaml.Unmarshal(bytes, &m); err != nil {
return fmt.Errorf("Failed to parse YAML configuration for %s: %v", service, err)
}
m = adjustContainerNames(m)
bytes, err = yaml.Marshal(m)
if err != nil {
return fmt.Errorf("Failed to marshal YAML configuration for %s: %v", service, err)
}
if err = p.Load(bytes); err != nil {
return fmt.Errorf("Failed to load %s: %v", service, err)
}
return nil
}
func LoadSpecialService(p *project.Project, cfg *config.CloudConfig, serviceName, serviceValue string) error {
// Save config in case load fails
previousConfig, ok := p.ServiceConfigs.Get(serviceName)
p.ServiceConfigs.Add(serviceName, &composeConfig.ServiceConfig{})
if err := LoadService(p, cfg, true, serviceValue); err != nil {
// Rollback to previous config
if ok {
p.ServiceConfigs.Add(serviceName, previousConfig)
}
return err
}
return nil
}
func loadConsoleService(cfg *config.CloudConfig, p *project.Project) error {
if cfg.Rancher.Console == "" || cfg.Rancher.Console == "default" {
return nil
}
return LoadSpecialService(p, cfg, "console", cfg.Rancher.Console)
}
func loadEngineService(cfg *config.CloudConfig, p *project.Project) error {
if cfg.Rancher.Docker.Engine == "" || cfg.Rancher.Docker.Engine == cfg.Rancher.Defaults.Docker.Engine {
return nil
}
return LoadSpecialService(p, cfg, "docker", cfg.Rancher.Docker.Engine)
}
func projectReload(p *project.Project, useNetwork *bool, loadConsole bool, environmentLookup *docker.ConfigEnvironment, authLookup *docker.ConfigAuthLookup) func() error {
enabled := map[interface{}]interface{}{}
return func() error {
cfg := config.LoadConfig()
environmentLookup.SetConfig(cfg)
authLookup.SetConfig(cfg)
enabled = addServices(p, enabled, cfg.Rancher.Services)
for service, serviceEnabled := range cfg.Rancher.ServicesInclude {
if _, ok := enabled[service]; ok || !serviceEnabled {
continue
}
if err := LoadService(p, cfg, *useNetwork, service); err != nil {
if err != network.ErrNoNetwork {
log.Error(err)
}
continue
}
enabled[service] = service
}
if !*useNetwork {
return nil
}
if loadConsole {
if err := loadConsoleService(cfg, p); err != nil {
log.Errorf("Failed to load console: %v", err)
}
}
if err := loadEngineService(cfg, p); err != nil {
log.Errorf("Failed to load engine: %v", err)
}
return nil
}
}

View File

@@ -1,37 +1,138 @@
package config
import (
"fmt"
log "github.com/Sirupsen/logrus"
yaml "github.com/cloudfoundry-incubator/candiedyaml"
"github.com/rancher/os/util"
)
func Merge(bytes []byte) error {
data, err := readConfigs(bytes, false, true)
func (c *CloudConfig) Import(bytes []byte) (*CloudConfig, error) {
data, err := readConfig(bytes, false, CloudConfigPrivateFile)
if err != nil {
return err
return c, err
}
existing, err := readConfigs(nil, false, true, CloudConfigFile)
if err != nil {
return err
}
return WriteToFile(util.Merge(existing, data), CloudConfigFile)
return NewConfig().Merge(data)
}
func Export(private, full bool) (string, error) {
rawCfg := loadRawDiskConfig(full)
if !private {
rawCfg = filterPrivateKeys(rawCfg)
func (c *CloudConfig) MergeBytes(bytes []byte) (*CloudConfig, error) {
data, err := readConfig(bytes, false)
if err != nil {
return c, err
}
return c.Merge(data)
}
var keysToStringify = []string{
"command",
"dns",
"dns_search",
"entrypoint",
"env_file",
"environment",
"labels",
"links",
}
func isPathToStringify(path []interface{}) bool {
l := len(path)
if l == 0 {
return false
}
if sk, ok := path[l-1].(string); ok {
return util.Contains(keysToStringify, sk)
}
return false
}
func stringifyValue(data interface{}, path []interface{}) interface{} {
switch data := data.(type) {
case map[interface{}]interface{}:
result := make(map[interface{}]interface{}, len(data))
if isPathToStringify(path) {
for k, v := range data {
switch v := v.(type) {
case []interface{}:
result[k] = stringifyValue(v, append(path, k))
case map[interface{}]interface{}:
result[k] = stringifyValue(v, append(path, k))
default:
result[k] = fmt.Sprint(v)
}
}
} else {
for k, v := range data {
result[k] = stringifyValue(v, append(path, k))
}
}
return result
case []interface{}:
result := make([]interface{}, len(data))
if isPathToStringify(path) {
for k, v := range data {
result[k] = fmt.Sprint(v)
}
} else {
for k, v := range data {
result[k] = stringifyValue(v, append(path, k))
}
}
return result
default:
return data
}
}
func StringifyValues(data map[interface{}]interface{}) map[interface{}]interface{} {
return stringifyValue(data, nil).(map[interface{}]interface{})
}
func (c *CloudConfig) Merge(values map[interface{}]interface{}) (*CloudConfig, error) {
d := map[interface{}]interface{}{}
if err := util.Convert(c, &d); err != nil {
return c, err
}
r := util.MapsUnion(d, StringifyValues(values))
t := &CloudConfig{}
if err := util.Convert(r, t); err != nil {
return c, err
}
return t, nil
}
func Dump(boot, private, full bool) (string, error) {
var cfg *CloudConfig
var err error
if full {
cfg, err = LoadConfig()
} else {
files := []string{CloudConfigBootFile, CloudConfigPrivateFile, CloudConfigFile}
if !private {
files = util.FilterStrings(files, func(x string) bool { return x != CloudConfigPrivateFile })
}
if !boot {
files = util.FilterStrings(files, func(x string) bool { return x != CloudConfigBootFile })
}
cfg, err = ChainCfgFuncs(nil,
func(_ *CloudConfig) (*CloudConfig, error) { return ReadConfig(nil, true, files...) },
amendNils,
)
}
bytes, err := yaml.Marshal(rawCfg)
if err != nil {
return "", err
}
bytes, err := yaml.Marshal(*cfg)
return string(bytes), err
}
func Get(key string) (interface{}, error) {
cfg := LoadConfig()
func (c *CloudConfig) Get(key string) (interface{}, error) {
data := map[interface{}]interface{}{}
if err := util.ConvertIgnoreOmitEmpty(cfg, &data); err != nil {
if err := util.Convert(c, &data); err != nil {
return nil, err
}
@@ -39,24 +140,46 @@ func Get(key string) (interface{}, error) {
return v, nil
}
func GetCmdline(key string) interface{} {
cmdline := readCmdline()
v, _ := getOrSetVal(key, cmdline, nil)
return v
func (c *CloudConfig) Set(key string, value interface{}) (*CloudConfig, error) {
data := map[interface{}]interface{}{}
if err := util.Convert(c, &data); err != nil {
return c, err
}
_, data = getOrSetVal(key, data, value)
return c.Merge(data)
}
func Set(key string, value interface{}) error {
existing, err := readConfigs(nil, false, true, CloudConfigFile)
func (c *CloudConfig) Save() error {
files := append([]string{OsConfigFile, OemConfigFile}, CloudConfigDirFiles()...)
files = util.FilterStrings(files, func(x string) bool { return x != CloudConfigPrivateFile })
exCfg, err := ChainCfgFuncs(nil,
func(_ *CloudConfig) (*CloudConfig, error) {
return ReadConfig(nil, true, files...)
},
readCmdline,
amendNils)
if err != nil {
return err
}
_, modified := getOrSetVal(key, existing, value)
exCfg = mergeMetadata(exCfg, readMetadata())
c := &CloudConfig{}
if err = util.Convert(modified, c); err != nil {
exData := map[interface{}]interface{}{}
if err := util.Convert(exCfg, &exData); err != nil {
return err
}
return WriteToFile(modified, CloudConfigFile)
data := map[interface{}]interface{}{}
if err := util.Convert(c, &data); err != nil {
return err
}
data = util.MapsDifference(data, exData)
log.WithFields(log.Fields{"diff": data}).Debug("The diff we're about to save")
if err := saveToDisk(data); err != nil {
return err
}
return nil
}

View File

@@ -1,12 +1,13 @@
package config
import (
"testing"
"fmt"
yaml "github.com/cloudfoundry-incubator/candiedyaml"
"testing"
"github.com/rancher/os/util"
"github.com/stretchr/testify/require"
"strings"
)
func TestFilterKey(t *testing.T) {
@@ -54,109 +55,126 @@ func TestFilterKey(t *testing.T) {
assert.Equal(expectedRest, rest)
}
func TestUnmarshalOrReturnString(t *testing.T) {
func TestStringifyValues(t *testing.T) {
assert := require.New(t)
data := map[interface{}]interface{}{
"ssh_authorized_keys": []string{"pubk1", "pubk2"},
"hostname": "ros-test",
"rancher": map[interface{}]interface{}{
"services": map[interface{}]interface{}{
"my-service": map[interface{}]interface{}{
"command": []interface{}{"echo", 1, false, "nothing"},
"labels": map[interface{}]interface{}{
"some-bool": true,
"some-num": 42,
},
"dsa-pub": "dsa-test2",
},
},
"docker": map[interface{}]interface{}{
"ca_key": "ca_key-test3",
"ca_cert": "ca_cert-test4",
"args": []string{"args_test5"},
},
},
}
expected := map[interface{}]interface{}{
"ssh_authorized_keys": []string{"pubk1", "pubk2"},
"hostname": "ros-test",
"rancher": map[interface{}]interface{}{
"services": map[interface{}]interface{}{
"my-service": map[interface{}]interface{}{
"command": []interface{}{"echo", "1", "false", "nothing"},
"labels": map[interface{}]interface{}{
"some-bool": "true",
"some-num": "42",
},
"dsa-pub": "dsa-test2",
},
},
"docker": map[interface{}]interface{}{
"ca_key": "ca_key-test3",
"ca_cert": "ca_cert-test4",
"args": []string{"args_test5"},
},
},
}
assert.Equal(expected, StringifyValues(data))
}
func TestFilterDottedKeys(t *testing.T) {
assert := require.New(t)
assert.Equal("ab", unmarshalOrReturnString("ab"))
assert.Equal("a\nb", unmarshalOrReturnString("a\nb"))
assert.Equal("a\n", unmarshalOrReturnString("a\n"))
assert.Equal("\nb", unmarshalOrReturnString("\nb"))
assert.Equal("a,b", unmarshalOrReturnString("a,b"))
assert.Equal("a,", unmarshalOrReturnString("a,"))
assert.Equal(",b", unmarshalOrReturnString(",b"))
data := map[interface{}]interface{}{
"ssh_authorized_keys": []string{"pubk1", "pubk2"},
"hostname": "ros-test",
"rancher": map[interface{}]interface{}{
"ssh": map[interface{}]interface{}{
"keys": map[interface{}]interface{}{
"dsa": "dsa-test1",
"dsa-pub": "dsa-test2",
},
},
"docker": map[interface{}]interface{}{
"ca_key": "ca_key-test3",
"ca_cert": "ca_cert-test4",
"args": []string{"args_test5"},
},
},
}
expectedFiltered := map[interface{}]interface{}{
"ssh_authorized_keys": []string{"pubk1", "pubk2"},
"rancher": map[interface{}]interface{}{
"ssh": map[interface{}]interface{}{
"keys": map[interface{}]interface{}{
"dsa": "dsa-test1",
"dsa-pub": "dsa-test2",
},
},
},
}
expectedRest := map[interface{}]interface{}{
"hostname": "ros-test",
"rancher": map[interface{}]interface{}{
"docker": map[interface{}]interface{}{
"ca_key": "ca_key-test3",
"ca_cert": "ca_cert-test4",
"args": []string{"args_test5"},
},
},
}
assert.Equal(int64(10), unmarshalOrReturnString("10"))
assert.Equal(true, unmarshalOrReturnString("true"))
assert.Equal(false, unmarshalOrReturnString("false"))
assert.Equal([]string{"rancher", "ssh"}, strings.Split("rancher.ssh", "."))
assert.Equal([]string{"ssh_authorized_keys"}, strings.Split("ssh_authorized_keys", "."))
assert.Equal([]interface{}{"a"}, unmarshalOrReturnString("[a]"))
assert.Equal([]interface{}{"a"}, unmarshalOrReturnString("[\"a\"]"))
filtered, rest := filterDottedKeys(data, []string{"ssh_authorized_keys", "rancher.ssh"})
assert.Equal([]interface{}{"a,"}, unmarshalOrReturnString("[\"a,\"]"))
assert.Equal([]interface{}{" a, "}, unmarshalOrReturnString("[\" a, \"]"))
assert.Equal([]interface{}{",a"}, unmarshalOrReturnString("[\",a\"]"))
assert.Equal([]interface{}{" ,a "}, unmarshalOrReturnString("[\" ,a \"]"))
assert.Equal([]interface{}{"a\n"}, unmarshalOrReturnString("[\"a\n\"]"))
assert.Equal([]interface{}{" a\n "}, unmarshalOrReturnString("[\" a\n \"]"))
assert.Equal([]interface{}{"\na"}, unmarshalOrReturnString("[\"\na\"]"))
assert.Equal([]interface{}{" \na "}, unmarshalOrReturnString("[\" \na \"]"))
assert.Equal([]interface{}{"a", "b"}, unmarshalOrReturnString("[a,b]"))
assert.Equal([]interface{}{"a", "b"}, unmarshalOrReturnString("[\"a\",\"b\"]"))
assert.Equal([]interface{}{"a,", "b"}, unmarshalOrReturnString("[\"a,\",b]"))
assert.Equal([]interface{}{"a", ",b"}, unmarshalOrReturnString("[a,\",b\"]"))
assert.Equal([]interface{}{" a, ", " ,b "}, unmarshalOrReturnString("[\" a, \",\" ,b \"]"))
assert.Equal([]interface{}{"a\n", "b"}, unmarshalOrReturnString("[\"a\n\",b]"))
assert.Equal([]interface{}{"a", "\nb"}, unmarshalOrReturnString("[a,\"\nb\"]"))
assert.Equal([]interface{}{" a\n ", " \nb "}, unmarshalOrReturnString("[\" a\n \",\" \nb \"]"))
assert.Equal([]interface{}{"a", int64(10)}, unmarshalOrReturnString("[a,10]"))
assert.Equal([]interface{}{int64(10), "a"}, unmarshalOrReturnString("[10,a]"))
assert.Equal([]interface{}{"a", true}, unmarshalOrReturnString("[a,true]"))
assert.Equal([]interface{}{false, "a"}, unmarshalOrReturnString("[false,a]"))
assert.Equal(expectedFiltered, filtered)
assert.Equal(expectedRest, rest)
}
func TestParseCmdline(t *testing.T) {
assert := require.New(t)
assert.Equal(map[interface{}]interface{}{
expected := map[interface{}]interface{}{
"rancher": map[interface{}]interface{}{
"key1": "value1",
"key2": "value2",
"rescue": true,
"key1": "value1",
"key2": "value2",
"keyArray": []string{"1", "2"},
"obj1": map[interface{}]interface{}{
"key3": "3value",
"obj2": map[interface{}]interface{}{
"key4": true,
},
},
"key5": 5,
},
}, parseCmdline("a b rancher.key1=value1 c rancher.key2=value2"))
}
assert.Equal(map[interface{}]interface{}{
"rancher": map[interface{}]interface{}{
"key": "a,b",
},
}, parseCmdline("rancher.key=a,b"))
actual := parseCmdline("a b rancher.rescue rancher.keyArray=[1,2] rancher.key1=value1 c rancher.key2=value2 rancher.obj1.key3=3value rancher.obj1.obj2.key4 rancher.key5=5")
assert.Equal(map[interface{}]interface{}{
"rancher": map[interface{}]interface{}{
"key": "a\nb",
},
}, parseCmdline("rancher.key=a\nb"))
assert.Equal(map[interface{}]interface{}{
"rancher": map[interface{}]interface{}{
"key": "a:b",
},
}, parseCmdline("rancher.key=a:b"))
assert.Equal(map[interface{}]interface{}{
"rancher": map[interface{}]interface{}{
"key": int64(5),
},
}, parseCmdline("rancher.key=5"))
assert.Equal(map[interface{}]interface{}{
"rancher": map[interface{}]interface{}{
"rescue": true,
},
}, parseCmdline("rancher.rescue"))
assert.Equal(map[interface{}]interface{}{
"rancher": map[interface{}]interface{}{
"keyArray": []interface{}{int64(1), int64(2)},
},
}, parseCmdline("rancher.keyArray=[1,2]"))
assert.Equal(map[interface{}]interface{}{
"rancher": map[interface{}]interface{}{
"strArray": []interface{}{"url:http://192.168.1.100/cloud-config"},
},
}, parseCmdline("rancher.strArray=[\"url:http://192.168.1.100/cloud-config\"]"))
assert.Equal(map[interface{}]interface{}{
"rancher": map[interface{}]interface{}{
"strArray": []interface{}{"url:http://192.168.1.100/cloud-config"},
},
}, parseCmdline("rancher.strArray=[url:http://192.168.1.100/cloud-config]"))
assert.Equal(expected, actual)
}
func TestGet(t *testing.T) {
@@ -235,12 +253,12 @@ func TestSet(t *testing.T) {
}
type OuterData struct {
One Data `yaml:"one"`
One Data `"yaml:one"`
}
type Data struct {
Two bool `yaml:"two"`
Three bool `yaml:"three"`
Two bool `"yaml:two"`
Three bool `"yaml:three"`
}
func TestMapMerge(t *testing.T) {
@@ -292,6 +310,8 @@ func TestUserDocker(t *testing.T) {
err = util.Convert(config, &data)
assert.Nil(err)
fmt.Println(data)
val, ok := data["rancher"].(map[interface{}]interface{})["docker"]
assert.True(ok)

View File

@@ -2,11 +2,11 @@ package config
import (
log "github.com/Sirupsen/logrus"
yaml "github.com/cloudfoundry-incubator/candiedyaml"
"strings"
"github.com/rancher/os/util"
"regexp"
"strconv"
"strings"
)
type CfgFunc func(*CloudConfig) (*CloudConfig, error)
@@ -59,12 +59,17 @@ func filterKey(data map[interface{}]interface{}, key []string) (filtered, rest m
return
}
func filterPrivateKeys(data map[interface{}]interface{}) map[interface{}]interface{} {
for _, privateKey := range PrivateKeys {
_, data = filterKey(data, strings.Split(privateKey, "."))
func filterDottedKeys(data map[interface{}]interface{}, keys []string) (filtered, rest map[interface{}]interface{}) {
filtered = map[interface{}]interface{}{}
rest = util.MapCopy(data)
for _, key := range keys {
f, r := filterKey(data, strings.Split(key, "."))
filtered = util.MapsUnion(filtered, f)
rest = util.MapsIntersection(rest, r)
}
return data
return
}
func getOrSetVal(args string, data map[interface{}]interface{}, value interface{}) (interface{}, map[interface{}]interface{}) {
@@ -82,7 +87,7 @@ func getOrSetVal(args string, data map[interface{}]interface{}, value interface{
// Reached end, set the value
if last && value != nil {
if s, ok := value.(string); ok {
value = unmarshalOrReturnString(s)
value = DummyMarshall(s)
}
t[part] = value
@@ -116,42 +121,28 @@ func getOrSetVal(args string, data map[interface{}]interface{}, value interface{
return "", tData
}
// Replace newlines and colons with random strings
// This is done to avoid YAML treating these as special characters
var (
newlineMagicString = "9XsJcx6dR5EERYCC"
colonMagicString = "V0Rc21pIVknMm2rr"
)
func reverseReplacement(result interface{}) interface{} {
switch val := result.(type) {
case map[interface{}]interface{}:
for k, v := range val {
val[k] = reverseReplacement(v)
func DummyMarshall(value string) interface{} {
if strings.HasPrefix(value, "[") && strings.HasSuffix(value, "]") {
result := []interface{}{}
for _, i := range strings.Split(value[1:len(value)-1], ",") {
result = append(result, strings.TrimSpace(i))
}
return val
case []interface{}:
for i, item := range val {
val[i] = reverseReplacement(item)
}
return val
case string:
val = strings.Replace(val, newlineMagicString, "\n", -1)
val = strings.Replace(val, colonMagicString, ":", -1)
return val
return result
}
return result
}
func unmarshalOrReturnString(value string) (result interface{}) {
value = strings.Replace(value, "\n", newlineMagicString, -1)
value = strings.Replace(value, ":", colonMagicString, -1)
if err := yaml.Unmarshal([]byte(value), &result); err != nil {
result = value
if value == "true" {
return true
} else if value == "false" {
return false
} else if ok, _ := regexp.MatchString("^[0-9]+$", value); ok {
i, err := strconv.Atoi(value)
if err != nil {
panic(err)
}
return i
}
result = reverseReplacement(result)
return
return value
}
func parseCmdline(cmdLine string) map[interface{}]interface{} {
@@ -176,7 +167,7 @@ outer:
keys := strings.Split(kv[0], ".")
for i, key := range keys {
if i == len(keys)-1 {
current[key] = unmarshalOrReturnString(value)
current[key] = DummyMarshall(value)
} else {
if obj, ok := current[key]; ok {
if newCurrent, ok := obj.(map[interface{}]interface{}); ok {

View File

@@ -11,60 +11,63 @@ import (
yaml "github.com/cloudfoundry-incubator/candiedyaml"
"github.com/coreos/coreos-cloudinit/datasource"
"github.com/coreos/coreos-cloudinit/initialize"
"github.com/docker/engine-api/types"
composeConfig "github.com/docker/libcompose/config"
"github.com/docker/libcompose/project"
"github.com/rancher/os/util"
)
var osConfig *CloudConfig
func NewConfig() *CloudConfig {
if osConfig == nil {
osConfig, _ = ReadConfig(nil, true, OsConfigFile, OemConfigFile)
}
newCfg := *osConfig
return &newCfg
}
func ReadConfig(bytes []byte, substituteMetadataVars bool, files ...string) (*CloudConfig, error) {
data, err := readConfigs(bytes, substituteMetadataVars, true, files...)
if data, err := readConfig(bytes, substituteMetadataVars, files...); err == nil {
c := &CloudConfig{}
if err := util.Convert(data, c); err != nil {
return nil, err
}
c, _ = amendNils(c)
c, _ = amendContainerNames(c)
return c, nil
} else {
return nil, err
}
}
func LoadConfig() (*CloudConfig, error) {
cfg, err := ChainCfgFuncs(NewConfig(),
readFilesAndMetadata,
readCmdline,
amendNils,
amendContainerNames)
if err != nil {
log.WithFields(log.Fields{"cfg": cfg, "err": err}).Error("Failed to load config")
return nil, err
}
c := &CloudConfig{}
if err := util.Convert(data, c); err != nil {
return nil, err
}
c = amendNils(c)
c = amendContainerNames(c)
return c, nil
}
log.Debug("Merging cloud-config from meta-data and user-data")
cfg = mergeMetadata(cfg, readMetadata())
func loadRawDiskConfig(full bool) map[interface{}]interface{} {
var rawCfg map[interface{}]interface{}
if full {
rawCfg, _ = readConfigs(nil, true, false, OsConfigFile, OemConfigFile)
if cfg.Rancher.Debug {
log.SetLevel(log.DebugLevel)
if !util.Contains(cfg.Rancher.Docker.Args, "-D") {
cfg.Rancher.Docker.Args = append(cfg.Rancher.Docker.Args, "-D")
}
if !util.Contains(cfg.Rancher.SystemDocker.Args, "-D") {
cfg.Rancher.SystemDocker.Args = append(cfg.Rancher.SystemDocker.Args, "-D")
}
}
files := append(CloudConfigDirFiles(), CloudConfigFile)
additionalCfgs, _ := readConfigs(nil, true, false, files...)
return util.Merge(rawCfg, additionalCfgs)
}
func loadRawConfig() map[interface{}]interface{} {
rawCfg := loadRawDiskConfig(true)
rawCfg = util.Merge(rawCfg, readCmdline())
rawCfg = applyDebugFlags(rawCfg)
return mergeMetadata(rawCfg, readMetadata())
}
func LoadConfig() *CloudConfig {
rawCfg := loadRawConfig()
cfg := &CloudConfig{}
if err := util.Convert(rawCfg, cfg); err != nil {
log.Errorf("Failed to parse configuration: %s", err)
return &CloudConfig{}
}
cfg = amendNils(cfg)
cfg = amendContainerNames(cfg)
return cfg
return cfg, nil
}
func CloudConfigDirFiles() []string {
files, err := ioutil.ReadDir(CloudConfigDir)
files, err := util.DirLs(CloudConfigDir)
if err != nil {
if os.IsNotExist(err) {
// do nothing
@@ -75,54 +78,36 @@ func CloudConfigDirFiles() []string {
return []string{}
}
var finalFiles []string
for _, file := range files {
if !file.IsDir() && !strings.HasPrefix(file.Name(), ".") {
finalFiles = append(finalFiles, path.Join(CloudConfigDir, file.Name()))
files = util.Filter(files, func(x interface{}) bool {
f := x.(os.FileInfo)
if f.IsDir() || strings.HasPrefix(f.Name(), ".") {
return false
}
}
return true
})
return finalFiles
}
func applyDebugFlags(rawCfg map[interface{}]interface{}) map[interface{}]interface{} {
cfg := &CloudConfig{}
if err := util.Convert(rawCfg, cfg); err != nil {
return rawCfg
}
if !cfg.Rancher.Debug {
return rawCfg
}
log.SetLevel(log.DebugLevel)
_, rawCfg = getOrSetVal("rancher.docker.debug", rawCfg, true)
_, rawCfg = getOrSetVal("rancher.system_docker.debug", rawCfg, true)
_, rawCfg = getOrSetVal("rancher.bootstrap_docker.debug", rawCfg, true)
_, rawCfg = getOrSetVal("rancher.log", rawCfg, true)
return rawCfg
return util.ToStrings(util.Map(files, func(x interface{}) interface{} {
return path.Join(CloudConfigDir, x.(os.FileInfo).Name())
}))
}
// mergeMetadata merges certain options from md (meta-data from the datasource)
// onto cc (a CloudConfig derived from user-data), if they are not already set
// on cc (i.e. user-data always takes precedence)
func mergeMetadata(rawCfg map[interface{}]interface{}, md datasource.Metadata) map[interface{}]interface{} {
if rawCfg == nil {
return nil
}
out := util.MapCopy(rawCfg)
outHostname, ok := out["hostname"]
if !ok {
outHostname = ""
func mergeMetadata(cc *CloudConfig, md datasource.Metadata) *CloudConfig {
if cc == nil {
return cc
}
out := cc
dirty := false
if md.Hostname != "" {
if outHostname != "" {
log.Debugf("Warning: user-data hostname (%s) overrides metadata hostname (%s)\n", outHostname, md.Hostname)
if out.Hostname != "" {
log.Debugf("Warning: user-data hostname (%s) overrides metadata hostname (%s)\n", out.Hostname, md.Hostname)
} else {
out["hostname"] = md.Hostname
out = &(*cc)
dirty = true
out.Hostname = md.Hostname
}
}
@@ -134,13 +119,14 @@ func mergeMetadata(rawCfg map[interface{}]interface{}, md datasource.Metadata) m
sort.Sort(sort.StringSlice(keys))
finalKeys, _ := out["ssh_authorized_keys"].([]interface{})
for _, k := range keys {
finalKeys = append(finalKeys, md.SSHPublicKeys[k])
if !dirty {
out = &(*cc)
dirty = true
}
out.SSHAuthorizedKeys = append(out.SSHAuthorizedKeys, md.SSHPublicKeys[k])
}
out["ssh_authorized_keys"] = finalKeys
return out
}
@@ -152,53 +138,68 @@ func readMetadata() datasource.Metadata {
return metadata
}
func readCmdline() map[interface{}]interface{} {
func readFilesAndMetadata(c *CloudConfig) (*CloudConfig, error) {
files := append(CloudConfigDirFiles(), CloudConfigFile)
data, err := readConfig(nil, true, files...)
if err != nil {
log.WithFields(log.Fields{"err": err, "files": files}).Error("Error reading config files")
return c, err
}
t, err := c.Merge(data)
if err != nil {
log.WithFields(log.Fields{"cfg": c, "data": data, "err": err}).Error("Error merging config data")
return c, err
}
return t, nil
}
func readCmdline(c *CloudConfig) (*CloudConfig, error) {
log.Debug("Reading config cmdline")
cmdLine, err := ioutil.ReadFile("/proc/cmdline")
if err != nil {
log.WithFields(log.Fields{"err": err}).Error("Failed to read kernel params")
return nil
return c, err
}
if len(cmdLine) == 0 {
return nil
return c, nil
}
log.Debugf("Config cmdline %s", cmdLine)
cmdLineObj := parseCmdline(strings.TrimSpace(util.UnescapeKernelParams(string(cmdLine))))
cmdLineObj := parseCmdline(strings.TrimSpace(string(cmdLine)))
return cmdLineObj
t, err := c.Merge(cmdLineObj)
if err != nil {
log.WithFields(log.Fields{"cfg": c, "cmdLine": cmdLine, "data": cmdLineObj, "err": err}).Warn("Error adding kernel params to config")
}
return t, nil
}
func amendNils(c *CloudConfig) *CloudConfig {
func amendNils(c *CloudConfig) (*CloudConfig, error) {
t := *c
if t.Rancher.Environment == nil {
t.Rancher.Environment = map[string]string{}
}
if t.Rancher.Autoformat == nil {
t.Rancher.Autoformat = map[string]*composeConfig.ServiceConfigV1{}
t.Rancher.Autoformat = map[string]*project.ServiceConfig{}
}
if t.Rancher.BootstrapContainers == nil {
t.Rancher.BootstrapContainers = map[string]*composeConfig.ServiceConfigV1{}
t.Rancher.BootstrapContainers = map[string]*project.ServiceConfig{}
}
if t.Rancher.Services == nil {
t.Rancher.Services = map[string]*composeConfig.ServiceConfigV1{}
t.Rancher.Services = map[string]*project.ServiceConfig{}
}
if t.Rancher.ServicesInclude == nil {
t.Rancher.ServicesInclude = map[string]bool{}
}
if t.Rancher.RegistryAuths == nil {
t.Rancher.RegistryAuths = map[string]types.AuthConfig{}
}
if t.Rancher.Sysctl == nil {
t.Rancher.Sysctl = map[string]string{}
}
return &t
return &t, nil
}
func amendContainerNames(c *CloudConfig) *CloudConfig {
for _, scm := range []map[string]*composeConfig.ServiceConfigV1{
func amendContainerNames(c *CloudConfig) (*CloudConfig, error) {
for _, scm := range []map[string]*project.ServiceConfig{
c.Rancher.Autoformat,
c.Rancher.BootstrapContainers,
c.Rancher.Services,
@@ -207,7 +208,7 @@ func amendContainerNames(c *CloudConfig) *CloudConfig {
v.ContainerName = k
}
}
return c
return c, nil
}
func WriteToFile(data interface{}, filename string) error {
@@ -216,10 +217,27 @@ func WriteToFile(data interface{}, filename string) error {
return err
}
return util.WriteFileAtomic(filename, content, 400)
return ioutil.WriteFile(filename, content, 400)
}
func readConfigs(bytes []byte, substituteMetadataVars, returnErr bool, files ...string) (map[interface{}]interface{}, error) {
func saveToDisk(data map[interface{}]interface{}) error {
private, config := filterDottedKeys(data, []string{
"rancher.ssh",
"rancher.docker.ca_key",
"rancher.docker.ca_cert",
"rancher.docker.server_key",
"rancher.docker.server_cert",
})
err := WriteToFile(config, CloudConfigFile)
if err != nil {
return err
}
return WriteToFile(private, CloudConfigPrivateFile)
}
func readConfig(bytes []byte, substituteMetadataVars bool, files ...string) (map[interface{}]interface{}, error) {
// You can't just overlay yaml bytes on to maps, it won't merge, but instead
// just override the keys and not merge the map values.
left := make(map[interface{}]interface{})
@@ -227,11 +245,7 @@ func readConfigs(bytes []byte, substituteMetadataVars, returnErr bool, files ...
for _, file := range files {
content, err := readConfigFile(file)
if err != nil {
if returnErr {
return nil, err
}
log.Errorf("Failed to read config file %s: %s", file, err)
continue
return nil, err
}
if len(content) == 0 {
continue
@@ -243,53 +257,24 @@ func readConfigs(bytes []byte, substituteMetadataVars, returnErr bool, files ...
right := make(map[interface{}]interface{})
err = yaml.Unmarshal(content, &right)
if err != nil {
if returnErr {
return nil, err
}
log.Errorf("Failed to parse config file %s: %s", file, err)
continue
}
// Verify there are no issues converting to CloudConfig
c := &CloudConfig{}
if err := util.Convert(right, c); err != nil {
if returnErr {
return nil, err
}
log.Errorf("Failed to parse config file %s: %s", file, err)
continue
}
left = util.Merge(left, right)
}
if bytes == nil || len(bytes) == 0 {
return left, nil
}
right := make(map[interface{}]interface{})
if substituteMetadataVars {
bytes = substituteVars(bytes, metadata)
}
if err := yaml.Unmarshal(bytes, &right); err != nil {
if returnErr {
return nil, err
}
log.Errorf("Failed to parse bytes: %s", err)
return left, nil
left = util.MapsUnion(left, right)
}
c := &CloudConfig{}
if err := util.Convert(right, c); err != nil {
if returnErr {
if bytes != nil && len(bytes) > 0 {
right := make(map[interface{}]interface{})
if substituteMetadataVars {
bytes = substituteVars(bytes, metadata)
}
if err := yaml.Unmarshal(bytes, &right); err != nil {
return nil, err
}
log.Errorf("Failed to parse bytes: %s", err)
return left, nil
left = util.MapsUnion(left, right)
}
left = util.Merge(left, right)
return left, nil
}

View File

@@ -1,52 +1,17 @@
package config
import (
"fmt"
"os"
"github.com/fatih/structs"
)
import "os"
func (d *DockerConfig) FullArgs() []string {
args := []string{"daemon"}
args = append(args, generateEngineOptsSlice(d.EngineOpts)...)
args = append(args, d.ExtraArgs...)
args := append(d.Args, d.ExtraArgs...)
if d.TLS {
args = append(args, d.TLSArgs...)
}
return args
}
func (d *DockerConfig) AppendEnv() []string {
return append(os.Environ(), d.Environment...)
}
func generateEngineOptsSlice(opts EngineOpts) []string {
optsStruct := structs.New(opts)
var optsSlice []string
for k, v := range optsStruct.Map() {
optTag := optsStruct.Field(k).Tag("opt")
switch value := v.(type) {
case string:
if value != "" {
optsSlice = append(optsSlice, fmt.Sprintf("--%s", optTag), value)
}
case *bool:
if value != nil {
if *value {
optsSlice = append(optsSlice, fmt.Sprintf("--%s", optTag))
} else {
optsSlice = append(optsSlice, fmt.Sprintf("--%s=false", optTag))
}
}
case map[string]string:
for k, v := range value {
optsSlice = append(optsSlice, fmt.Sprintf("--%s", optTag), fmt.Sprintf("%s=%s", k, v))
}
}
}
return optsSlice
}

View File

@@ -1,48 +0,0 @@
package config
import (
"fmt"
"strings"
"testing"
)
func testContains(t *testing.T, s string, substrs ...string) {
for _, substr := range substrs {
if !strings.Contains(s, substr) {
t.Fail()
}
}
}
func TestGenerateEngineOptsString(t *testing.T) {
if len(generateEngineOptsSlice(EngineOpts{})) != 0 {
t.Fail()
}
testContains(t, fmt.Sprint(generateEngineOptsSlice(EngineOpts{
Bridge: "bridge",
})), "--bridge bridge")
testContains(t, fmt.Sprint(generateEngineOptsSlice(EngineOpts{
SelinuxEnabled: &[]bool{true}[0],
})), "--selinux-enabled")
testContains(t, fmt.Sprint(generateEngineOptsSlice(EngineOpts{
SelinuxEnabled: &[]bool{false}[0],
})), "--selinux-enabled=false")
testContains(t, fmt.Sprint(generateEngineOptsSlice(EngineOpts{
LogOpts: map[string]string{
"max-size": "25m",
"max-file": "2",
},
})), "--log-opt max-size=25m", "--log-opt max-file=2")
testContains(t, fmt.Sprint(generateEngineOptsSlice(EngineOpts{
Bridge: "bridge",
SelinuxEnabled: &[]bool{true}[0],
LogOpts: map[string]string{
"max-size": "25m",
"max-file": "2",
},
})), "--bridge bridge", "--selinux-enabled", "--log-opt max-size=25m", "--log-opt max-file=2")
}

View File

@@ -1,12 +1,8 @@
package config
import (
"fmt"
"runtime"
"github.com/coreos/coreos-cloudinit/config"
"github.com/docker/engine-api/types"
composeConfig "github.com/docker/libcompose/config"
"github.com/docker/libcompose/project"
"github.com/rancher/netconf"
)
@@ -24,7 +20,6 @@ const (
MODULES_ARCHIVE = "/modules.tar"
DEBUG = false
SYSTEM_DOCKER_LOG = "/var/log/system-docker.log"
SYSTEM_DOCKER_BIN = "/usr/bin/system-docker"
LABEL = "label"
HASH = "io.rancher.os.hash"
@@ -32,7 +27,6 @@ const (
DETACH = "io.rancher.os.detach"
CREATE_ONLY = "io.rancher.os.createonly"
RELOAD_CONFIG = "io.rancher.os.reloadconfig"
CONSOLE = "io.rancher.os.console"
SCOPE = "io.rancher.os.scope"
REBUILD = "io.docker.compose.rebuild"
SYSTEM = "system"
@@ -40,6 +34,7 @@ const (
OsConfigFile = "/usr/share/ros/os-config.yml"
CloudConfigDir = "/var/lib/rancher/conf/cloud-config.d"
CloudConfigBootFile = "/var/lib/rancher/conf/cloud-config.d/boot.yml"
CloudConfigPrivateFile = "/var/lib/rancher/conf/cloud-config.d/private.yml"
CloudConfigNetworkFile = "/var/lib/rancher/conf/cloud-config.d/network.yml"
CloudConfigScriptFile = "/var/lib/rancher/conf/cloud-config-script"
MetaDataFile = "/var/lib/rancher/conf/metadata"
@@ -49,32 +44,12 @@ const (
var (
OemConfigFile = OEM + "/oem-config.yml"
VERSION string
ARCH string
SUFFIX string
OS_REPO string
OS_BASE string
PrivateKeys = []string{
"rancher.ssh",
"rancher.docker.ca_key",
"rancher.docker.ca_cert",
"rancher.docker.server_key",
"rancher.docker.server_cert",
}
)
func init() {
if VERSION == "" {
VERSION = "v0.0.0-dev"
}
if ARCH == "" {
ARCH = runtime.GOARCH
}
if SUFFIX == "" && ARCH != "amd64" {
SUFFIX = "_" + ARCH
}
if OS_BASE == "" {
OS_BASE = fmt.Sprintf("%s/os-base:%s%s", OS_REPO, VERSION, SUFFIX)
}
}
type Repository struct {
@@ -85,47 +60,32 @@ type Repositories map[string]Repository
type CloudConfig struct {
SSHAuthorizedKeys []string `yaml:"ssh_authorized_keys"`
WriteFiles []File `yaml:"write_files"`
WriteFiles []config.File `yaml:"write_files"`
Hostname string `yaml:"hostname"`
Mounts [][]string `yaml:"mounts,omitempty"`
Rancher RancherConfig `yaml:"rancher,omitempty"`
Runcmd [][]string `yaml:"runcmd,omitempty"`
}
type File struct {
config.File
Container string `yaml:"container,omitempty"`
Rancher RancherConfig `yaml:"rancher,omitempty"`
}
type RancherConfig struct {
Console string `yaml:"console,omitempty"`
Environment map[string]string `yaml:"environment,omitempty"`
Services map[string]*composeConfig.ServiceConfigV1 `yaml:"services,omitempty"`
BootstrapContainers map[string]*composeConfig.ServiceConfigV1 `yaml:"bootstrap,omitempty"`
Autoformat map[string]*composeConfig.ServiceConfigV1 `yaml:"autoformat,omitempty"`
BootstrapDocker DockerConfig `yaml:"bootstrap_docker,omitempty"`
CloudInit CloudInit `yaml:"cloud_init,omitempty"`
Debug bool `yaml:"debug,omitempty"`
RmUsr bool `yaml:"rm_usr,omitempty"`
NoSharedRoot bool `yaml:"no_sharedroot,omitempty"`
Log bool `yaml:"log,omitempty"`
ForceConsoleRebuild bool `yaml:"force_console_rebuild,omitempty"`
Disable []string `yaml:"disable,omitempty"`
ServicesInclude map[string]bool `yaml:"services_include,omitempty"`
Modules []string `yaml:"modules,omitempty"`
Network netconf.NetworkConfig `yaml:"network,omitempty"`
DefaultNetwork netconf.NetworkConfig `yaml:"default_network,omitempty"`
Repositories Repositories `yaml:"repositories,omitempty"`
Ssh SshConfig `yaml:"ssh,omitempty"`
State StateConfig `yaml:"state,omitempty"`
SystemDocker DockerConfig `yaml:"system_docker,omitempty"`
Upgrade UpgradeConfig `yaml:"upgrade,omitempty"`
Docker DockerConfig `yaml:"docker,omitempty"`
RegistryAuths map[string]types.AuthConfig `yaml:"registry_auths,omitempty"`
Defaults Defaults `yaml:"defaults,omitempty"`
ResizeDevice string `yaml:"resize_device,omitempty"`
Sysctl map[string]string `yaml:"sysctl,omitempty"`
RestartServices []string `yaml:"restart_services,omitempty"`
Environment map[string]string `yaml:"environment,omitempty"`
Services map[string]*project.ServiceConfig `yaml:"services,omitempty"`
BootstrapContainers map[string]*project.ServiceConfig `yaml:"bootstrap,omitempty"`
Autoformat map[string]*project.ServiceConfig `yaml:"autoformat,omitempty"`
BootstrapDocker DockerConfig `yaml:"bootstrap_docker,omitempty"`
CloudInit CloudInit `yaml:"cloud_init,omitempty"`
Debug bool `yaml:"debug,omitempty"`
RmUsr bool `yaml:"rm_usr,omitempty"`
Log bool `yaml:"log,omitempty"`
Disable []string `yaml:"disable,omitempty"`
ServicesInclude map[string]bool `yaml:"services_include,omitempty"`
Modules []string `yaml:"modules,omitempty"`
Network netconf.NetworkConfig `yaml:"network,omitempty"`
Repositories Repositories `yaml:"repositories,omitempty"`
Ssh SshConfig `yaml:"ssh,omitempty"`
State StateConfig `yaml:"state,omitempty"`
SystemDocker DockerConfig `yaml:"system_docker,omitempty"`
Upgrade UpgradeConfig `yaml:"upgrade,omitempty"`
Docker DockerConfig `yaml:"docker,omitempty"`
}
type UpgradeConfig struct {
@@ -134,31 +94,10 @@ type UpgradeConfig struct {
Rollback string `yaml:"rollback,omitempty"`
}
type EngineOpts struct {
Bridge string `yaml:"bridge,omitempty" opt:"bridge"`
ConfigFile string `yaml:"config_file,omitempty" opt:"config-file"`
Containerd string `yaml:"containerd,omitempty" opt:"containerd"`
Debug *bool `yaml:"debug,omitempty" opt:"debug"`
ExecRoot string `yaml:"exec_root,omitempty" opt:"exec-root"`
Group string `yaml:"group,omitempty" opt:"group"`
Graph string `yaml:"graph,omitempty" opt:"graph"`
Host string `yaml:"host,omitempty" opt:"host"`
LiveRestore *bool `yaml:"live_restore,omitempty" opt:"live-restore"`
LogDriver string `yaml:"log_driver,omitempty" opt:"log-driver"`
LogOpts map[string]string `yaml:"log_opts,omitempty" opt:"log-opt"`
PidFile string `yaml:"pid_file,omitempty" opt:"pidfile"`
RegistryMirror string `yaml:"registry_mirror,omitempty" opt:"registry-mirror"`
Restart *bool `yaml:"restart,omitempty" opt:"restart"`
SelinuxEnabled *bool `yaml:"selinux_enabled,omitempty" opt:"selinux-enabled"`
StorageDriver string `yaml:"storage_driver,omitempty" opt:"storage-driver"`
UserlandProxy *bool `yaml:"userland_proxy,omitempty" opt:"userland-proxy"`
}
type DockerConfig struct {
EngineOpts
Engine string `yaml:"engine,omitempty"`
TLS bool `yaml:"tls,omitempty"`
TLSArgs []string `yaml:"tls_args,flow,omitempty"`
Args []string `yaml:"args,flow,omitempty"`
ExtraArgs []string `yaml:"extra_args,flow,omitempty"`
ServerCert string `yaml:"server_cert,omitempty"`
ServerKey string `yaml:"server_key,omitempty"`
@@ -177,9 +116,9 @@ type StateConfig struct {
Directory string `yaml:"directory,omitempty"`
FsType string `yaml:"fstype,omitempty"`
Dev string `yaml:"dev,omitempty"`
Wait bool `yaml:"wait,omitempty"`
Required bool `yaml:"required,omitempty"`
Autoformat []string `yaml:"autoformat,omitempty"`
FormatZero bool `yaml:"formatzero,omitempty"`
MdadmScan bool `yaml:"mdadm_scan,omitempty"`
Script string `yaml:"script,omitempty"`
OemFsType string `yaml:"oem_fstype,omitempty"`
@@ -190,12 +129,6 @@ type CloudInit struct {
Datasources []string `yaml:"datasources,omitempty"`
}
type Defaults struct {
Hostname string `yaml:"hostname,omitempty"`
Docker DockerConfig `yaml:"docker,omitempty"`
Network netconf.NetworkConfig `yaml:"network,omitempty"`
}
func (r Repositories) ToArray() []string {
result := make([]string, 0, len(r))
for _, repo := range r {

View File

@@ -1,82 +0,0 @@
package docker
import (
"encoding/base64"
"fmt"
"strings"
log "github.com/Sirupsen/logrus"
"github.com/docker/docker/registry"
"github.com/docker/engine-api/types"
"github.com/docker/libcompose/docker"
"github.com/rancher/os/config"
)
// ConfigAuthLookup will lookup registry auth info from cloud config
// if a context is set, it will also lookup auth info from the Docker config file
type ConfigAuthLookup struct {
cfg *config.CloudConfig
context *docker.Context
dockerConfigAuthLookup *docker.ConfigAuthLookup
}
func NewConfigAuthLookup(cfg *config.CloudConfig) *ConfigAuthLookup {
return &ConfigAuthLookup{
cfg: cfg,
}
}
func populateRemaining(authConfig *types.AuthConfig) error {
if authConfig.Auth == "" {
return nil
}
decoded, err := base64.URLEncoding.DecodeString(authConfig.Auth)
if err != nil {
return err
}
decodedSplit := strings.Split(string(decoded), ":")
if len(decodedSplit) != 2 {
return fmt.Errorf("Invalid auth: %s", authConfig.Auth)
}
authConfig.Username = decodedSplit[0]
authConfig.Password = decodedSplit[1]
return nil
}
func (c *ConfigAuthLookup) SetConfig(cfg *config.CloudConfig) {
c.cfg = cfg
}
func (c *ConfigAuthLookup) SetContext(context *docker.Context) {
c.context = context
c.dockerConfigAuthLookup = docker.NewConfigAuthLookup(context)
}
func (c *ConfigAuthLookup) Lookup(repoInfo *registry.RepositoryInfo) types.AuthConfig {
if repoInfo == nil || repoInfo.Index == nil {
return types.AuthConfig{}
}
authConfig := registry.ResolveAuthConfig(c.All(), repoInfo.Index)
err := populateRemaining(&authConfig)
if err != nil {
log.Error(err)
return types.AuthConfig{}
}
return authConfig
}
func (c *ConfigAuthLookup) All() map[string]types.AuthConfig {
registryAuths := c.cfg.Rancher.RegistryAuths
if c.dockerConfigAuthLookup != nil {
for registry, authConfig := range c.dockerConfigAuthLookup.All() {
registryAuths[registry] = authConfig
}
}
return registryAuths
}

View File

@@ -1,27 +1,26 @@
package docker
import (
dockerClient "github.com/docker/engine-api/client"
dockerClient "github.com/fsouza/go-dockerclient"
"github.com/rancher/os/config"
"golang.org/x/net/context"
)
func NewSystemClient() (dockerClient.APIClient, error) {
func NewSystemClient() (*dockerClient.Client, error) {
return NewClient(config.DOCKER_SYSTEM_HOST)
}
func NewDefaultClient() (dockerClient.APIClient, error) {
func NewDefaultClient() (*dockerClient.Client, error) {
return NewClient(config.DOCKER_HOST)
}
func NewClient(endpoint string) (dockerClient.APIClient, error) {
client, err := dockerClient.NewClient(endpoint, "", nil, nil)
func NewClient(endpoint string) (*dockerClient.Client, error) {
client, err := dockerClient.NewClient(endpoint)
if err != nil {
return nil, err
}
err = ClientOK(endpoint, func() bool {
_, err := client.Info(context.Background())
_, err := client.Info()
return err == nil
})

View File

@@ -4,36 +4,34 @@ import (
"fmt"
"sync"
"golang.org/x/net/context"
log "github.com/Sirupsen/logrus"
dockerclient "github.com/docker/engine-api/client"
composeClient "github.com/docker/libcompose/docker/client"
"github.com/docker/libcompose/docker"
"github.com/docker/libcompose/project"
dockerclient "github.com/fsouza/go-dockerclient"
"github.com/rancher/os/config"
"github.com/rancher/os/util"
)
type ClientFactory struct {
userClient dockerclient.APIClient
systemClient dockerclient.APIClient
userClient *dockerclient.Client
systemClient *dockerclient.Client
userOnce sync.Once
systemOnce sync.Once
}
func NewClientFactory(opts composeClient.Options) (project.ClientFactory, error) {
func NewClientFactory(opts docker.ClientOpts) (docker.ClientFactory, error) {
userOpts := opts
systemOpts := opts
userOpts.Host = config.DOCKER_HOST
systemOpts.Host = config.DOCKER_SYSTEM_HOST
userClient, err := composeClient.Create(userOpts)
userClient, err := docker.CreateClient(userOpts)
if err != nil {
return nil, err
}
systemClient, err := composeClient.Create(systemOpts)
systemClient, err := docker.CreateClient(systemOpts)
if err != nil {
return nil, err
}
@@ -44,7 +42,7 @@ func NewClientFactory(opts composeClient.Options) (project.ClientFactory, error)
}, nil
}
func (c *ClientFactory) Create(service project.Service) dockerclient.APIClient {
func (c *ClientFactory) Create(service project.Service) *dockerclient.Client {
if IsSystemContainer(service.Config()) {
waitFor(&c.systemOnce, c.systemClient, config.DOCKER_SYSTEM_HOST)
return c.systemClient
@@ -54,10 +52,10 @@ func (c *ClientFactory) Create(service project.Service) dockerclient.APIClient {
return c.userClient
}
func waitFor(once *sync.Once, client dockerclient.APIClient, endpoint string) {
func waitFor(once *sync.Once, client *dockerclient.Client, endpoint string) {
once.Do(func() {
err := ClientOK(endpoint, func() bool {
_, err := client.Info(context.Background())
_, err := client.Info()
return err == nil
})
if err != nil {

View File

@@ -4,7 +4,7 @@ import (
"fmt"
"strings"
composeConfig "github.com/docker/libcompose/config"
"github.com/docker/libcompose/project"
"github.com/rancher/os/config"
)
@@ -27,30 +27,11 @@ func appendEnv(array []string, key, value string) []string {
return append(array, fmt.Sprintf("%s=%s", key, value))
}
func environmentFromCloudConfig(cfg *config.CloudConfig) map[string]string {
environment := cfg.Rancher.Environment
if cfg.Rancher.Network.HttpProxy != "" {
environment["http_proxy"] = cfg.Rancher.Network.HttpProxy
environment["HTTP_PROXY"] = cfg.Rancher.Network.HttpProxy
}
if cfg.Rancher.Network.HttpsProxy != "" {
environment["https_proxy"] = cfg.Rancher.Network.HttpsProxy
environment["HTTPS_PROXY"] = cfg.Rancher.Network.HttpsProxy
}
if cfg.Rancher.Network.NoProxy != "" {
environment["no_proxy"] = cfg.Rancher.Network.NoProxy
environment["NO_PROXY"] = cfg.Rancher.Network.NoProxy
}
return environment
}
func lookupKeys(cfg *config.CloudConfig, keys ...string) []string {
environment := environmentFromCloudConfig(cfg)
for _, key := range keys {
if strings.HasSuffix(key, "*") {
result := []string{}
for envKey, envValue := range environment {
for envKey, envValue := range cfg.Rancher.Environment {
keyPrefix := key[:len(key)-1]
if strings.HasPrefix(envKey, keyPrefix) {
result = appendEnv(result, envKey, envValue)
@@ -60,7 +41,7 @@ func lookupKeys(cfg *config.CloudConfig, keys ...string) []string {
if len(result) > 0 {
return result
}
} else if value, ok := environment[key]; ok {
} else if value, ok := cfg.Rancher.Environment[key]; ok {
return appendEnv([]string{}, key, value)
}
}
@@ -68,11 +49,7 @@ func lookupKeys(cfg *config.CloudConfig, keys ...string) []string {
return []string{}
}
func (c *ConfigEnvironment) SetConfig(cfg *config.CloudConfig) {
c.cfg = cfg
}
func (c *ConfigEnvironment) Lookup(key, serviceName string, serviceConfig *composeConfig.ServiceConfig) []string {
func (c *ConfigEnvironment) Lookup(key, serviceName string, serviceConfig *project.ServiceConfig) []string {
fullKey := fmt.Sprintf("%s/%s", serviceName, key)
return lookupKeys(c.cfg, fullKey, key)
}

View File

@@ -1,17 +1,11 @@
package docker
import (
"fmt"
"github.com/Sirupsen/logrus"
dockerclient "github.com/docker/engine-api/client"
"github.com/docker/engine-api/types"
composeConfig "github.com/docker/libcompose/config"
"github.com/docker/libcompose/docker"
"github.com/docker/libcompose/project"
"github.com/docker/libcompose/project/options"
dockerclient "github.com/fsouza/go-dockerclient"
"github.com/rancher/os/config"
"golang.org/x/net/context"
)
type Service struct {
@@ -21,7 +15,7 @@ type Service struct {
project *project.Project
}
func NewService(factory *ServiceFactory, name string, serviceConfig *composeConfig.ServiceConfig, context *docker.Context, project *project.Project) *Service {
func NewService(factory *ServiceFactory, name string, serviceConfig *project.ServiceConfig, context *docker.Context, project *project.Project) *Service {
return &Service{
Service: docker.NewService(name, serviceConfig, context),
deps: factory.Deps,
@@ -56,20 +50,20 @@ func (s *Service) missingImage() bool {
return false
}
client := s.context.ClientFactory.Create(s)
_, _, err := client.ImageInspectWithRaw(context.Background(), s.Config().Image, false)
return err != nil
i, err := client.InspectImage(s.Config().Image)
return err != nil || i == nil
}
func (s *Service) requiresSyslog() bool {
return s.Config().Logging.Driver == "syslog"
return s.Config().LogDriver == "syslog"
}
func (s *Service) requiresUserDocker() bool {
return s.Config().Labels[config.SCOPE] != config.SYSTEM
return s.Config().Labels.MapParts()[config.SCOPE] != config.SYSTEM
}
func appendLink(deps []project.ServiceRelationship, name string, optional bool, p *project.Project) []project.ServiceRelationship {
if _, ok := p.ServiceConfigs.Get(name); !ok {
if _, ok := p.Configs[name]; !ok {
return deps
}
rel := project.NewServiceRelationship(name, project.RelTypeLink)
@@ -77,26 +71,25 @@ func appendLink(deps []project.ServiceRelationship, name string, optional bool,
return append(deps, rel)
}
func (s *Service) shouldRebuild(ctx context.Context) (bool, error) {
containers, err := s.Containers(ctx)
func (s *Service) shouldRebuild() (bool, error) {
containers, err := s.Containers()
if err != nil {
return false, err
}
cfg := config.LoadConfig()
for _, c := range containers {
outOfSync, err := c.(*docker.Container).OutOfSync(ctx, s.Service.Config().Image)
outOfSync, err := c.(*docker.Container).OutOfSync(s.Service.Config().Image)
if err != nil {
return false, err
}
_, containerInfo, err := s.getContainer(ctx)
if err != nil {
_, containerInfo, err := s.getContainer()
if containerInfo == nil || err != nil {
return false, err
}
name := containerInfo.Name[1:]
origRebuildLabel := containerInfo.Config.Labels[config.REBUILD]
newRebuildLabel := s.Config().Labels[config.REBUILD]
newRebuildLabel := s.Config().Labels.MapParts()[config.REBUILD]
rebuildLabelChanged := newRebuildLabel != origRebuildLabel
logrus.WithFields(logrus.Fields{
"origRebuildLabel": origRebuildLabel,
@@ -104,66 +97,46 @@ func (s *Service) shouldRebuild(ctx context.Context) (bool, error) {
"rebuildLabelChanged": rebuildLabelChanged,
"outOfSync": outOfSync}).Debug("Rebuild values")
if newRebuildLabel == "always" {
return true, nil
}
if s.Name() == "console" && cfg.Rancher.ForceConsoleRebuild {
if err := config.Set("rancher.force_console_rebuild", false); err != nil {
return false, err
}
return true, nil
}
if outOfSync {
if s.Name() == "console" {
origConsoleLabel := containerInfo.Config.Labels[config.CONSOLE]
newConsoleLabel := s.Config().Labels[config.CONSOLE]
if newConsoleLabel != origConsoleLabel {
return true, nil
}
} else if rebuildLabelChanged || origRebuildLabel != "false" {
return true, nil
} else {
logrus.Warnf("%s needs rebuilding", name)
}
if origRebuildLabel == "always" || rebuildLabelChanged || origRebuildLabel != "false" && outOfSync {
logrus.Infof("Rebuilding %s", name)
return true, err
} else if outOfSync {
logrus.Warnf("%s needs rebuilding", name)
}
}
return false, nil
}
func (s *Service) Up(ctx context.Context, options options.Up) error {
labels := s.Config().Labels
func (s *Service) Up() error {
labels := s.Config().Labels.MapParts()
if err := s.Service.Create(ctx, options.Create); err != nil {
if err := s.Service.Create(); err != nil {
return err
}
shouldRebuild, err := s.shouldRebuild(ctx)
shouldRebuild, err := s.shouldRebuild()
if err != nil {
return err
}
if shouldRebuild {
logrus.Infof("Rebuilding %s", s.Name())
cs, err := s.Service.Containers(ctx)
cs, err := s.Service.Containers()
if err != nil {
return err
}
for _, c := range cs {
if _, err := c.(*docker.Container).Recreate(ctx, s.Config().Image); err != nil {
if _, err := c.(*docker.Container).Recreate(s.Config().Image); err != nil {
return err
}
}
if err = s.rename(ctx); err != nil {
return err
}
s.rename()
}
if labels[config.CREATE_ONLY] == "true" {
return s.checkReload(labels)
}
if err := s.Service.Up(ctx, options); err != nil {
if err := s.Service.Up(); err != nil {
return err
}
if labels[config.DETACH] == "false" {
if err := s.wait(ctx); err != nil {
if err := s.wait(); err != nil {
return err
}
}
@@ -178,53 +151,52 @@ func (s *Service) checkReload(labels map[string]string) error {
return nil
}
func (s *Service) Create(ctx context.Context, options options.Create) error {
return s.Service.Create(ctx, options)
func (s *Service) Create() error {
return s.Service.Create()
}
func (s *Service) getContainer(ctx context.Context) (dockerclient.APIClient, types.ContainerJSON, error) {
containers, err := s.Service.Containers(ctx)
func (s *Service) getContainer() (*dockerclient.Client, *dockerclient.Container, error) {
containers, err := s.Service.Containers()
if err != nil {
return nil, types.ContainerJSON{}, err
return nil, nil, err
}
if len(containers) == 0 {
return nil, types.ContainerJSON{}, fmt.Errorf("No containers found for %s", s.Name())
return nil, nil, nil
}
id, err := containers[0].ID()
if err != nil {
return nil, types.ContainerJSON{}, err
return nil, nil, err
}
client := s.context.ClientFactory.Create(s)
info, err := client.ContainerInspect(context.Background(), id)
info, err := client.InspectContainer(id)
return client, info, err
}
func (s *Service) wait(ctx context.Context) error {
client, info, err := s.getContainer(ctx)
if err != nil {
func (s *Service) wait() error {
client, info, err := s.getContainer()
if err != nil || info == nil {
return err
}
if _, err := client.ContainerWait(context.Background(), info.ID); err != nil {
if _, err := client.WaitContainer(info.ID); err != nil {
return err
}
return nil
}
func (s *Service) rename(ctx context.Context) error {
client, info, err := s.getContainer(ctx)
if err != nil {
func (s *Service) rename() error {
client, info, err := s.getContainer()
if err != nil || info == nil {
return err
}
if len(info.Name) > 0 && info.Name[1:] != s.Name() {
logrus.Debugf("Renaming container %s => %s", info.Name[1:], s.Name())
return client.ContainerRename(context.Background(), info.ID, s.Name())
return client.RenameContainer(dockerclient.RenameContainerOptions{ID: info.ID, Name: s.Name()})
} else {
return nil
}

View File

@@ -1,7 +1,6 @@
package docker
import (
composeConfig "github.com/docker/libcompose/config"
"github.com/docker/libcompose/docker"
"github.com/docker/libcompose/project"
"github.com/rancher/os/util"
@@ -12,13 +11,13 @@ type ServiceFactory struct {
Deps map[string][]string
}
func (s *ServiceFactory) Create(project *project.Project, name string, serviceConfig *composeConfig.ServiceConfig) (project.Service, error) {
if after := serviceConfig.Labels["io.rancher.os.after"]; after != "" {
func (s *ServiceFactory) Create(project *project.Project, name string, serviceConfig *project.ServiceConfig) (project.Service, error) {
if after := serviceConfig.Labels.MapParts()["io.rancher.os.after"]; after != "" {
for _, dep := range util.TrimSplit(after, ",") {
s.Deps[name] = append(s.Deps[name], dep)
}
}
if before := serviceConfig.Labels["io.rancher.os.before"]; before != "" {
if before := serviceConfig.Labels.MapParts()["io.rancher.os.before"]; before != "" {
for _, dep := range util.TrimSplit(before, ",") {
s.Deps[dep] = append(s.Deps[dep], name)
}

View File

@@ -1,11 +1,11 @@
package docker
import (
composeConfig "github.com/docker/libcompose/config"
"github.com/docker/libcompose/project"
"github.com/rancher/os/config"
)
func IsSystemContainer(serviceConfig *composeConfig.ServiceConfig) bool {
return serviceConfig.Labels[config.SCOPE] == config.SYSTEM
func IsSystemContainer(serviceConfig *project.ServiceConfig) bool {
return serviceConfig.Labels.MapParts()[config.SCOPE] == config.SYSTEM
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 18 KiB

After

Width:  |  Height:  |  Size: 30 KiB

View File

@@ -1,63 +0,0 @@
package hostname
import (
"bufio"
"io/ioutil"
"os"
"strings"
"syscall"
"github.com/rancher/os/config"
)
func SetHostnameFromCloudConfig(cc *config.CloudConfig) error {
var hostname string
if cc.Hostname == "" {
hostname = cc.Rancher.Defaults.Hostname
} else {
hostname = cc.Hostname
}
if hostname == "" {
return nil
}
// set hostname
if err := syscall.Sethostname([]byte(hostname)); err != nil {
return err
}
return nil
}
func SyncHostname() error {
hostname, err := os.Hostname()
if err != nil {
return err
}
if hostname == "" {
return nil
}
hosts, err := os.Open("/etc/hosts")
defer hosts.Close()
if err != nil {
return err
}
lines := bufio.NewScanner(hosts)
hostsContent := ""
for lines.Scan() {
line := strings.TrimSpace(lines.Text())
fields := strings.Fields(line)
if len(fields) > 0 && fields[0] == "127.0.1.1" {
hostsContent += "127.0.1.1 " + hostname + "\n"
continue
}
hostsContent += line + "\n"
}
if err := ioutil.WriteFile("/etc/hosts", []byte(hostsContent), 0600); err != nil {
return err
}
return nil
}

View File

@@ -1,2 +0,0 @@
assets
build/dist/kernel

View File

@@ -1,2 +0,0 @@
FROM scratch
ADD build/rootfs.tar /

View File

@@ -1,9 +0,0 @@
#!/bin/bash
TAR=${DOWNLOADS}/rootfs.tar
if [ -e $TAR ]; then
cd $(dirname $0)
mkdir -p build
cp $TAR build
fi

View File

@@ -1,39 +0,0 @@
FROM rancher/os-rootfs
RUN ln -s /dev/null /etc/udev/rules.d/80-net-name-slot.rules
# Cleanup Buildroot
RUN rm /sbin/poweroff /sbin/reboot /sbin/halt && \
sed -i '/^root/s!/bin/sh!/bin/bash!' /etc/passwd && \
echo 'RancherOS \n \l' > /etc/issue && \
rm -rf /run \
/linuxrc \
/etc/os-release \
/var/cache \
/var/lock \
/var/log \
/var/run \
/var/spool \
/var/lib/misc && \
mkdir -p \
/home \
/run \
/var/cache \
/var/lock \
/var/log \
/var/run \
/var/spool && \
passwd -l root && \
addgroup -g 1100 rancher && \
addgroup -g 1101 docker && \
addgroup -g 1103 sudo && \
adduser -u 1100 -G rancher -D -h /home/rancher -s /bin/bash rancher && \
adduser -u 1101 -G docker -D -h /home/docker -s /bin/bash docker && \
adduser rancher docker && \
adduser rancher sudo && \
adduser docker sudo && \
echo '%sudo ALL=(ALL) ALL' >> /etc/sudoers
COPY inputrc /etc/inputrc
COPY growpart /usr/bin/growpart
RUN sed -i s/"partx --update \"\$part\" \"\$dev\""/"partx --update --nr \"\$part\" \"\$dev\""/g /usr/bin/growpart && \
sed -i -e 's/duid/clientid/g' /etc/dhcpcd.conf
ENTRYPOINT ["/usr/bin/ros", "entrypoint"]

View File

@@ -1,14 +0,0 @@
#!/bin/bash
if [ -e /host/dev ]; then
mount --rbind /host/dev /dev
fi
CA_BASE=/etc/ssl/certs/ca-certificates.crt.rancher
CA=/etc/ssl/certs/ca-certificates.crt
if [[ -e ${CA_BASE} && ! -e ${CA} ]]; then
cp $CA_BASE $CA
fi
exec "$@"

View File

@@ -1,780 +0,0 @@
#!/bin/sh
# Copyright (C) 2011 Canonical Ltd.
# Copyright (C) 2013 Hewlett-Packard Development Company, L.P.
#
# Authors: Scott Moser <smoser@canonical.com>
# Juerg Haefliger <juerg.haefliger@hp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# the fudge factor. if within this many bytes dont bother
FUDGE=${GROWPART_FUDGE:-$((1024*1024))}
TEMP_D=""
RESTORE_FUNC=""
RESTORE_HUMAN=""
VERBOSITY=0
DISK=""
PART=""
PT_UPDATE=false
DRY_RUN=0
SFDISK_VERSION=""
SFDISK_2_26="22600"
SFDISK_V_WORKING_GPT="22603"
MBR_BACKUP=""
GPT_BACKUP=""
_capture=""
error() {
echo "$@" 1>&2
}
fail() {
[ $# -eq 0 ] || echo "FAILED:" "$@"
exit 2
}
nochange() {
echo "NOCHANGE:" "$@"
exit 1
}
changed() {
echo "CHANGED:" "$@"
exit 0
}
change() {
echo "CHANGE:" "$@"
exit 0
}
cleanup() {
if [ -n "${RESTORE_FUNC}" ]; then
error "***** WARNING: Resize failed, attempting to revert ******"
if ${RESTORE_FUNC} ; then
error "***** Appears to have gone OK ****"
else
error "***** FAILED! ******"
if [ -n "${RESTORE_HUMAN}" -a -f "${RESTORE_HUMAN}" ]; then
error "**** original table looked like: ****"
cat "${RESTORE_HUMAN}" 1>&2
else
error "We seem to have not saved the partition table!"
fi
fi
fi
[ -z "${TEMP_D}" -o ! -d "${TEMP_D}" ] || rm -Rf "${TEMP_D}"
}
debug() {
local level=${1}
shift
[ "${level}" -gt "${VERBOSITY}" ] && return
if [ "${DEBUG_LOG}" ]; then
echo "$@" >>"${DEBUG_LOG}"
else
error "$@"
fi
}
debugcat() {
local level="$1"
shift;
[ "${level}" -gt "$VERBOSITY" ] && return
if [ "${DEBUG_LOG}" ]; then
cat "$@" >>"${DEBUG_LOG}"
else
cat "$@" 1>&2
fi
}
mktemp_d() {
# just a mktemp -d that doens't need mktemp if its not there.
_RET=$(mktemp -d "${TMPDIR:-/tmp}/${0##*/}.XXXXXX" 2>/dev/null) &&
return
_RET=$(umask 077 && t="${TMPDIR:-/tmp}/${0##*/}.$$" &&
mkdir "${t}" && echo "${t}")
return
}
Usage() {
cat <<EOF
${0##*/} disk partition
rewrite partition table so that partition takes up all the space it can
options:
-h | --help print Usage and exit
--fudge F if part could be resized, but change would be
less than 'F' bytes, do not resize (default: ${FUDGE})
-N | --dry-run only report what would be done, show new 'sfdisk -d'
-v | --verbose increase verbosity / debug
-u | --update R update the the kernel partition table info after growing
this requires kernel support and 'partx --update'
R is one of:
- 'auto' : [default] update partition if possible
- 'force' : try despite sanity checks (fail on failure)
- 'off' : do not attempt
- 'on' : fail if sanity checks indicate no support
Example:
- ${0##*/} /dev/sda 1
Resize partition 1 on /dev/sda
EOF
}
bad_Usage() {
Usage 1>&2
error "$@"
exit 2
}
sfdisk_restore_legacy() {
sfdisk --no-reread "${DISK}" -I "${MBR_BACKUP}"
}
sfdisk_restore() {
# files are named: sfdisk-<device>-<offset>.bak
local f="" offset="" fails=0
for f in "${MBR_BACKUP}"*.bak; do
[ -f "$f" ] || continue
offset=${f##*-}
offset=${offset%.bak}
[ "$offset" = "$f" ] && {
error "WARN: confused by file $f";
continue;
}
dd "if=$f" "of=${DISK}" seek=$(($offset)) bs=1 conv=notrunc ||
{ error "WARN: failed restore from $f"; fails=$(($fails+1)); }
done
return $fails
}
sfdisk_worked_but_blkrrpart_failed() {
local ret="$1" output="$2"
# exit code found was just 1, but dont insist on that
#[ $ret -eq 1 ] || return 1
# Successfully wrote the new partition table
grep -qi "Success.* wrote.* new.* partition" "$output" &&
grep -qi "BLKRRPART: Device or resource busy" "$output"
return
}
get_sfdisk_version() {
# set SFDISK_VERSION to MAJOR*10000+MINOR*100+MICRO
local out oifs="$IFS" ver=""
[ -n "$SFDISK_VERSION" ] && return 0
# expected output: sfdisk from util-linux 2.25.2
out=$(sfdisk --version) ||
{ error "failed to get sfdisk version"; return 1; }
set -- $out
ver=$4
case "$ver" in
[0-9]*.[0-9]*.[0-9]|[0-9].[0-9]*)
IFS="."; set -- $ver; IFS="$oifs"
SFDISK_VERSION=$(($1*10000+$2*100+${3:-0}))
return 0;;
*) error "unexpected output in sfdisk --version [$out]"
return 1;;
esac
}
resize_sfdisk() {
local humanpt="${TEMP_D}/recovery"
local mbr_backup="${TEMP_D}/orig.save"
local restore_func=""
local format="$1"
local change_out=${TEMP_D}/change.out
local dump_out=${TEMP_D}/dump.out
local new_out=${TEMP_D}/new.out
local dump_mod=${TEMP_D}/dump.mod
local tmp="${TEMP_D}/tmp.out"
local err="${TEMP_D}/err.out"
local mbr_max_512="4294967296"
local pt_start pt_size pt_end max_end new_size change_info dpart
local sector_num sector_size disk_size tot out
rqe sfd_list sfdisk --list --unit=S "$DISK" >"$tmp" ||
fail "failed: sfdisk --list $DISK"
if [ "${SFDISK_VERSION}" -lt ${SFDISK_2_26} ]; then
# exected output contains: Units: sectors of 512 bytes, ...
out=$(awk '$1 == "Units:" && $5 ~ /bytes/ { print $4 }' "$tmp") ||
fail "failed to read sfdisk output"
if [ -z "$out" ]; then
error "WARN: sector size not found in sfdisk output, assuming 512"
sector_size=512
else
sector_size="$out"
fi
local _w _cyl _w1 _heads _w2 sectors _w3 t s
# show-size is in units of 1024 bytes (same as /proc/partitions)
t=$(sfdisk --show-size "${DISK}") ||
fail "failed: sfdisk --show-size $DISK"
disk_size=$((t*1024))
sector_num=$(($disk_size/$sector_size))
msg="disk size '$disk_size' not evenly div by sector size '$sector_size'"
[ "$((${disk_size}%${sector_size}))" -eq 0 ] ||
error "WARN: $msg"
restore_func=sfdisk_restore_legacy
else
# --list first line output:
# Disk /dev/vda: 20 GiB, 21474836480 bytes, 41943040 sectors
local _x
read _x _x _x _x disk_size _x sector_num _x < "$tmp"
sector_size=$((disk_size/$sector_num))
restore_func=sfdisk_restore
fi
debug 1 "$sector_num sectors of $sector_size. total size=${disk_size} bytes"
[ $(($disk_size/512)) -gt $mbr_max_512 ] &&
debug 1 "WARN: disk is larger than 2TB. additional space will go unused."
rqe sfd_dump sfdisk --unit=S --dump "${DISK}" >"${dump_out}" ||
fail "failed to dump sfdisk info for ${DISK}"
RESTORE_HUMAN="$dump_out"
{
echo "## sfdisk --unit=S --dump ${DISK}"
cat "${dump_out}"
} >"$humanpt"
[ $? -eq 0 ] || fail "failed to save sfdisk -d output"
RESTORE_HUMAN="$humanpt"
debugcat 1 "$humanpt"
sed -e 's/,//g; s/start=/start /; s/size=/size /' "${dump_out}" \
>"${dump_mod}" ||
fail "sed failed on dump output"
dpart="${DISK}${PART}" # disk and partition number
if [ -b "${DISK}p${PART}" -a "${DISK%[0-9]}" != "${DISK}" ]; then
# for block devices that end in a number (/dev/nbd0)
# the partition is "<name>p<partition_number>" (/dev/nbd0p1)
dpart="${DISK}p${PART}"
elif [ "${DISK#/dev/loop[0-9]}" != "${DISK}" ]; then
# for /dev/loop devices, sfdisk output will be <name>p<number>
# format also, even though there is not a device there.
dpart="${DISK}p${PART}"
fi
pt_start=$(awk '$1 == pt { print $4 }' "pt=${dpart}" <"${dump_mod}") &&
pt_size=$(awk '$1 == pt { print $6 }' "pt=${dpart}" <"${dump_mod}") &&
[ -n "${pt_start}" -a -n "${pt_size}" ] &&
pt_end=$((${pt_size}+${pt_start})) ||
fail "failed to get start and end for ${dpart} in ${DISK}"
# find the minimal starting location that is >= pt_end
max_end=$(awk '$3 == "start" { if($4 >= pt_end && $4 < min)
{ min = $4 } } END { printf("%s\n",min); }' \
min=${sector_num} pt_end=${pt_end} "${dump_mod}") &&
[ -n "${max_end}" ] ||
fail "failed to get max_end for partition ${PART}"
mbr_max_sectors=$((mbr_max_512*$((sector_size/512))))
if [ "$max_end" -gt "$mbr_max_sectors" ]; then
max_end=$mbr_max_sectors
fi
if [ "$format" = "gpt" ]; then
# sfdisk respects 'last-lba' in input, and complains about
# partitions that go past that. without it, it does the right thing.
sed -i '/^last-lba:/d' "$dump_out" ||
fail "failed to remove last-lba from output"
fi
local gpt_second_size="33"
if [ "${max_end}" -gt "$((${sector_num}-${gpt_second_size}))" ]; then
# if mbr allow subsequent conversion to gpt without shrinking the
# partition. safety net at cost of 33 sectors, seems reasonable.
# if gpt, we can't write there anyway.
debug 1 "padding ${gpt_second_size} sectors for gpt secondary header"
max_end=$((${sector_num}-${gpt_second_size}))
fi
debug 1 "max_end=${max_end} tot=${sector_num} pt_end=${pt_end}" \
"pt_start=${pt_start} pt_size=${pt_size}"
[ $((${pt_end})) -eq ${max_end} ] &&
nochange "partition ${PART} is size ${pt_size}. it cannot be grown"
[ $((${pt_end}+(${FUDGE}/$sector_size))) -gt ${max_end} ] &&
nochange "partition ${PART} could only be grown by" \
"$((${max_end}-${pt_end})) [fudge=$((${FUDGE}/$sector_size))]"
# now, change the size for this partition in ${dump_out} to be the
# new size
new_size=$((${max_end}-${pt_start}))
sed "\|^\s*${dpart} |s/${pt_size},/${new_size},/" "${dump_out}" \
>"${new_out}" ||
fail "failed to change size in output"
change_info="partition=${PART} start=${pt_start} old: size=${pt_size} end=${pt_end} new: size=${new_size},end=${max_end}"
if [ ${DRY_RUN} -ne 0 ]; then
echo "CHANGE: ${change_info}"
{
echo "# === old sfdisk -d ==="
cat "${dump_out}"
echo "# === new sfdisk -d ==="
cat "${new_out}"
} 1>&2
exit 0
fi
MBR_BACKUP="${mbr_backup}"
LANG=C sfdisk --no-reread "${DISK}" --force \
-O "${mbr_backup}" <"${new_out}" >"${change_out}" 2>&1
ret=$?
[ $ret -eq 0 ] || RESTORE_FUNC="${restore_func}"
if [ $ret -eq 0 ]; then
:
elif $PT_UPDATE &&
sfdisk_worked_but_blkrrpart_failed "$ret" "${change_out}"; then
# if the command failed, but it looks like only because
# the device was busy and we have pt_update, then go on
debug 1 "sfdisk failed, but likely only because of blkrrpart"
else
error "attempt to resize ${DISK} failed. sfdisk output below:"
sed 's,^,| ,' "${change_out}" 1>&2
fail "failed to resize"
fi
rq pt_update pt_update "$DISK" "$PART" ||
fail "pt_resize failed"
RESTORE_FUNC=""
changed "${change_info}"
# dump_out looks something like:
## partition table of /tmp/out.img
#unit: sectors
#
#/tmp/out.img1 : start= 1, size= 48194, Id=83
#/tmp/out.img2 : start= 48195, size= 963900, Id=83
#/tmp/out.img3 : start= 1012095, size= 305235, Id=82
#/tmp/out.img4 : start= 1317330, size= 771120, Id= 5
#/tmp/out.img5 : start= 1317331, size= 642599, Id=83
#/tmp/out.img6 : start= 1959931, size= 48194, Id=83
#/tmp/out.img7 : start= 2008126, size= 80324, Id=83
}
gpt_restore() {
sgdisk -l "${GPT_BACKUP}" "${DISK}"
}
resize_sgdisk() {
GPT_BACKUP="${TEMP_D}/pt.backup"
local pt_info="${TEMP_D}/pt.info"
local pt_pretend="${TEMP_D}/pt.pretend"
local pt_data="${TEMP_D}/pt.data"
local out="${TEMP_D}/out"
local dev="disk=${DISK} partition=${PART}"
local pt_start pt_end pt_size last pt_max code guid name new_size
local old new change_info sector_size
# Dump the original partition information and details to disk. This is
# used in case something goes wrong and human interaction is required
# to revert any changes.
rqe sgd_info sgdisk "--info=${PART}" --print "${DISK}" >"${pt_info}" ||
fail "${dev}: failed to dump original sgdisk info"
RESTORE_HUMAN="${pt_info}"
sector_size=$(awk '$0 ~ /^Logical sector size:.*bytes/ { print $4 }' \
"$pt_info") && [ -n "$sector_size" ] || {
sector_size=512
error "WARN: did not find sector size, assuming 512"
}
debug 1 "$dev: original sgdisk info:"
debugcat 1 "${pt_info}"
# Pretend to move the backup GPT header to the end of the disk and dump
# the resulting partition information. We use this info to determine if
# we have to resize the partition.
rqe sgd_pretend sgdisk --pretend --move-second-header \
--print "${DISK}" >"${pt_pretend}" ||
fail "${dev}: failed to dump pretend sgdisk info"
debug 1 "$dev: pretend sgdisk info"
debugcat 1 "${pt_pretend}"
# Extract the partition data from the pretend dump
awk 'found { print } ; $1 == "Number" { found = 1 }' \
"${pt_pretend}" >"${pt_data}" ||
fail "${dev}: failed to parse pretend sgdisk info"
# Get the start and end sectors of the partition to be grown
pt_start=$(awk '$1 == '"${PART}"' { print $2 }' "${pt_data}") &&
[ -n "${pt_start}" ] ||
fail "${dev}: failed to get start sector"
pt_end=$(awk '$1 == '"${PART}"' { print $3 }' "${pt_data}") &&
[ -n "${pt_end}" ] ||
fail "${dev}: failed to get end sector"
pt_size="$((${pt_end} - ${pt_start}))"
# Get the last usable sector
last=$(awk '/last usable sector is/ { print $NF }' \
"${pt_pretend}") && [ -n "${last}" ] ||
fail "${dev}: failed to get last usable sector"
# Find the minimal start sector that is >= pt_end
pt_max=$(awk '{ if ($2 >= pt_end && $2 < min) { min = $2 } } END \
{ print min }' min="${last}" pt_end="${pt_end}" \
"${pt_data}") && [ -n "${pt_max}" ] ||
fail "${dev}: failed to find max end sector"
debug 1 "${dev}: pt_start=${pt_start} pt_end=${pt_end}" \
"pt_size=${pt_size} pt_max=${pt_max} last=${last}"
# Check if the partition can be grown
[ "${pt_end}" -eq "${pt_max}" ] &&
nochange "${dev}: size=${pt_size}, it cannot be grown"
[ "$((${pt_end} + ${FUDGE}/${sector_size}))" -gt "${pt_max}" ] &&
nochange "${dev}: could only be grown by" \
"$((${pt_max} - ${pt_end})) [fudge=$((${FUDGE}/$sector_size))]"
# The partition can be grown if we made it here. Get some more info
# about it so we can do it properly.
# FIXME: Do we care about the attribute flags?
code=$(awk '/^Partition GUID code:/ { print $4 }' "${pt_info}")
guid=$(awk '/^Partition unique GUID:/ { print $4 }' "${pt_info}")
name=$(awk '/^Partition name:/ { gsub(/'"'"'/, "") ; \
if (NF >= 3) print substr($0, index($0, $3)) }' "${pt_info}")
[ -n "${code}" -a -n "${guid}" ] ||
fail "${dev}: failed to parse sgdisk details"
debug 1 "${dev}: code=${code} guid=${guid} name='${name}'"
local wouldrun=""
[ "$DRY_RUN" -ne 0 ] && wouldrun="would-run"
# Calculate the new size of the partition
new_size=$((${pt_max} - ${pt_start}))
old="old: size=${pt_size},end=${pt_end}"
new="new: size=${new_size},end=${pt_max}"
change_info="${dev}: start=${pt_start} ${old} ${new}"
# Backup the current partition table, we're about to modify it
rq sgd_backup $wouldrun sgdisk "--backup=${GPT_BACKUP}" "${DISK}" ||
fail "${dev}: failed to backup the partition table"
# Modify the partition table. We do it all in one go (the order is
# important!):
# - move the GPT backup header to the end of the disk
# - delete the partition
# - recreate the partition with the new size
# - set the partition code
# - set the partition GUID
# - set the partition name
rq sgdisk_mod $wouldrun sgdisk --move-second-header "--delete=${PART}" \
"--new=${PART}:${pt_start}:${pt_max}" \
"--typecode=${PART}:${code}" \
"--partition-guid=${PART}:${guid}" \
"--change-name=${PART}:${name}" "${DISK}" &&
rq pt_update $wouldrun pt_update "$DISK" "$PART" || {
RESTORE_FUNC=gpt_restore
fail "${dev}: failed to repartition"
}
# Dry run
[ "${DRY_RUN}" -ne 0 ] && change "${change_info}"
changed "${change_info}"
}
kver_to_num() {
local kver="$1" maj="" min="" mic="0"
kver=${kver%%-*}
maj=${kver%%.*}
min=${kver#${maj}.}
min=${min%%.*}
mic=${kver#${maj}.${min}.}
[ "$kver" = "$mic" ] && mic=0
_RET=$(($maj*1000*1000+$min*1000+$mic))
}
kver_cmp() {
local op="$2" n1="" n2=""
kver_to_num "$1"
n1="$_RET"
kver_to_num "$3"
n2="$_RET"
[ $n1 $op $n2 ]
}
rq() {
# runquieterror(label, command)
# gobble stderr of a command unless it errors
local label="$1" ret="" efile=""
efile="$TEMP_D/$label.err"
shift;
local rlabel="running"
[ "$1" = "would-run" ] && rlabel="would-run" && shift
local cmd="" x=""
for x in "$@"; do
[ "${x#* }" != "$x" -o "${x#* \"}" != "$x" ] && x="'$x'"
cmd="$cmd $x"
done
cmd=${cmd# }
debug 2 "$rlabel[$label][$_capture]" "$cmd"
[ "$rlabel" = "would-run" ] && return 0
if [ "${_capture}" = "erronly" ]; then
"$@" 2>"$TEMP_D/$label.err"
ret=$?
else
"$@" >"$TEMP_D/$label.err" 2>&1
ret=$?
fi
if [ $ret -ne 0 ]; then
error "failed [$label:$ret]" "$@"
cat "$efile" 1>&2
fi
return $ret
}
rqe() {
local _capture="erronly"
rq "$@"
}
verify_ptupdate() {
local input="$1" found="" reason="" kver=""
# we can always satisfy 'off'
if [ "$input" = "off" ]; then
_RET="false";
return 0;
fi
if command -v partx >/dev/null 2>&1; then
local out="" ret=0
out=$(partx --help 2>&1)
ret=$?
if [ $ret -eq 0 ]; then
echo "$out" | grep -q -- --update || {
reason="partx has no '--update' flag in usage."
found="off"
}
else
reason="'partx --help' returned $ret. assuming it is old."
found="off"
fi
else
reason="no 'partx' command"
found="off"
fi
if [ -z "$found" ]; then
if [ "$(uname)" != "Linux" ]; then
reason="Kernel is not Linux per uname."
found="off"
fi
fi
if [ -z "$found" ]; then
kver=$(uname -r) || debug 1 "uname -r failed!"
if ! kver_cmp "${kver-0.0.0}" -ge 3.8.0; then
reason="Kernel '$kver' < 3.8.0."
found="off"
fi
fi
if [ -z "$found" ]; then
_RET="true"
return 0
fi
case "$input" in
on) error "$reason"; return 1;;
auto)
_RET="false";
debug 1 "partition update disabled: $reason"
return 0;;
force)
_RET="true"
error "WARNING: ptupdate forced on even though: $reason"
return 0;;
esac
error "unknown input '$input'";
return 1;
}
pt_update() {
local dev="$1" part="$2" update="${3:-$PT_UPDATE}"
if ! $update; then
return 0
fi
# partx only works on block devices (do not run on file)
[ -b "$dev" ] || return 0
partx --update "$part" "$dev"
}
has_cmd() {
command -v "${1}" >/dev/null 2>&1
}
resize_sgdisk_gpt() {
resize_sgdisk gpt
}
resize_sgdisk_dos() {
fail "unable to resize dos label with sgdisk"
}
resize_sfdisk_gpt() {
resize_sfdisk gpt
}
resize_sfdisk_dos() {
resize_sfdisk dos
}
get_table_format() {
local out="" disk="$1"
if has_cmd blkid && out=$(blkid -o value -s PTTYPE "$disk") &&
[ "$out" = "dos" -o "$out" = "gpt" ]; then
_RET="$out"
return
fi
_RET="dos"
if [ ${SFDISK_VERSION} -lt ${SFDISK_2_26} ] &&
out=$(sfdisk --id --force "$disk" 1 2>/dev/null); then
if [ "$out" = "ee" ]; then
_RET="gpt"
else
_RET="dos"
fi
return
elif out=$(LANG=C sfdisk --list "$disk"); then
out=$(echo "$out" | sed -e '/Disklabel type/!d' -e 's/.*: //')
case "$out" in
gpt|dos) _RET="$out";;
*) error "WARN: unknown label $out";;
esac
fi
}
get_resizer() {
local format="$1" user=${2:-"auto"}
case "$user" in
sgdisk) _RET="resize_sgdisk_$format"; return;;
sfdisk) _RET="resize_sfdisk_$format"; return;;
auto) :;;
*) error "unexpected input: '$user'";;
esac
if [ "$format" = "dos" ]; then
_RET="resize_sfdisk_dos"
return 0
fi
if [ "${SFDISK_VERSION}" -ge ${SFDISK_V_WORKING_GPT} ]; then
# sfdisk 2.26.2 works for resize but loses type (LP: #1474090)
_RET="resize_sfdisk_gpt"
elif has_cmd sgdisk; then
_RET="resize_sgdisk_$format"
else
error "no tools available to resize disk with '$format'"
return 1
fi
return 0
}
pt_update="auto"
resizer=${GROWPART_RESIZER:-"auto"}
while [ $# -ne 0 ]; do
cur=${1}
next=${2}
case "$cur" in
-h|--help)
Usage
exit 0
;;
--fudge)
FUDGE=${next}
shift
;;
-N|--dry-run)
DRY_RUN=1
;;
-u|--update|--update=*)
if [ "${cur#--update=}" != "$cur" ]; then
next="${cur#--update=}"
else
shift
fi
case "$next" in
off|auto|force|on) pt_update=$next;;
*) fail "unknown --update option: $next";;
esac
;;
-v|--verbose)
VERBOSITY=$(($VERBOSITY+1))
;;
--)
shift
break
;;
-*)
fail "unknown option ${cur}"
;;
*)
if [ -z "${DISK}" ]; then
DISK=${cur}
else
[ -z "${PART}" ] || fail "confused by arg ${cur}"
PART=${cur}
fi
;;
esac
shift
done
[ -n "${DISK}" ] || bad_Usage "must supply disk and partition-number"
[ -n "${PART}" ] || bad_Usage "must supply partition-number"
has_cmd "sfdisk" || fail "sfdisk not found"
get_sfdisk_version || fail
[ -e "${DISK}" ] || fail "${DISK}: does not exist"
[ "${PART#*[!0-9]}" = "${PART}" ] || fail "partition-number must be a number"
verify_ptupdate "$pt_update" || fail
PT_UPDATE=$_RET
debug 1 "update-partition set to $PT_UPDATE"
mktemp_d && TEMP_D="${_RET}" || fail "failed to make temp dir"
trap cleanup 0 # EXIT - some shells may not like 'EXIT' but are ok with 0
# get the ID of the first partition to determine if it's MBR or GPT
get_table_format "$DISK" || fail
format=$_RET
get_resizer "$format" "$resizer" ||
fail "failed to get a resizer for id '$id'"
resizer=$_RET
debug 1 "resizing $PART on $DISK using $resizer"
"$resizer"
# vi: ts=4 noexpandtab

View File

@@ -1,67 +0,0 @@
# /etc/inputrc - global inputrc for libreadline
# See readline(3readline) and `info rluserman' for more information.
# Be 8 bit clean.
set input-meta on
set output-meta on
# To allow the use of 8bit-characters like the german umlauts, uncomment
# the line below. However this makes the meta key not work as a meta key,
# which is annoying to those which don't need to type in 8-bit characters.
# set convert-meta off
# try to enable the application keypad when it is called. Some systems
# need this to enable the arrow keys.
# set enable-keypad on
# see /usr/share/doc/bash/inputrc.arrows for other codes of arrow keys
# do not bell on tab-completion
# set bell-style none
# set bell-style visible
# some defaults / modifications for the emacs mode
$if mode=emacs
# allow the use of the Home/End keys
"\e[1~": beginning-of-line
"\e[4~": end-of-line
# allow the use of the Delete/Insert keys
"\e[3~": delete-char
"\e[2~": quoted-insert
# mappings for "page up" and "page down" to step to the beginning/end
# of the history
# "\e[5~": beginning-of-history
# "\e[6~": end-of-history
# alternate mappings for "page up" and "page down" to search the history
# "\e[5~": history-search-backward
# "\e[6~": history-search-forward
# mappings for Ctrl-left-arrow and Ctrl-right-arrow for word moving
"\e[1;5C": forward-word
"\e[1;5D": backward-word
"\e[5C": forward-word
"\e[5D": backward-word
"\e\e[C": forward-word
"\e\e[D": backward-word
$if term=rxvt
"\e[7~": beginning-of-line
"\e[8~": end-of-line
"\eOc": forward-word
"\eOd": backward-word
$endif
# for non RH/Debian xterm, can't hurt for RH/Debian xterm
# "\eOH": beginning-of-line
# "\eOF": end-of-line
# for freebsd console
# "\e[H": beginning-of-line
# "\e[F": end-of-line
$endif

View File

@@ -1,3 +0,0 @@
FROM rancher/os-base
COPY lid /etc/acpi/events/
COPY suspend.sh /etc/acpi/suspend.sh

View File

@@ -1,2 +0,0 @@
event=button/lid
action=/etc/acpi/suspend.sh %e

View File

@@ -1,4 +0,0 @@
#!/bin/sh
if [ "$3" = "close" ]; then
echo -n "mem" > /sys/power/state
fi

View File

@@ -1,4 +0,0 @@
FROM rancher/os-base
COPY auto-format.sh /usr/sbin/
COPY od-1m0 /
ENTRYPOINT ["/usr/sbin/auto-format.sh"]

View File

@@ -1,63 +0,0 @@
#!/bin/bash
set -ex
MAGIC=${MAGIC:-"boot2docker, please format-me"}
DEVS=(${AUTOFORMAT})
for dev in ${DEVS[@]}; do
if [ -b "${dev}" ]; then
# Test for our magic string (it means that the disk was made by ./boot2docker init)
HEADER=`dd if=${dev} bs=1 count=${#MAGIC} 2>/dev/null`
if [ "$HEADER" = "$MAGIC" ]; then
# save the preload userdata.tar file
dd if=${dev} of=/userdata.tar bs=1 count=8192
elif ! od -A d -N 1048576 ${dev} | head -n 3 | diff ./od-1m0 - >/dev/null 2>&1; then
# do not auto-format if the disk does not begin with 1MB filled with 00
continue
fi
if [ -e "/userdata.tar" ]; then
mkfs.ext4 -L B2D_STATE ${dev}
mkdir -p /mnt/new-root
mount -t ext4 ${dev} /mnt/new-root
pushd /mnt/new-root
mkdir -p ./var/lib/rancher/conf/cloud-config.d
echo $(tar -xvf /userdata.tar)
AUTHORIZED_KEY1=$(cat ./.ssh/authorized_keys)
AUTHORIZED_KEY2=$(cat ./.ssh/authorized_keys2)
tee ./var/lib/rancher/conf/cloud-config.d/machine.yml << EOF
#cloud-config
rancher:
network:
interfaces:
eth0:
dhcp: true
eth1:
dhcp: true
lo:
address: 127.0.0.1/8
ssh_authorized_keys:
- ${AUTHORIZED_KEY1}
- ${AUTHORIZED_KEY2}
users:
- name: docker
ssh_authorized_keys:
- ${AUTHORIZED_KEY1}
- ${AUTHORIZED_KEY2}
EOF
popd
umount /mnt/new-root
else
mkfs.ext4 -L RANCHER_STATE ${dev}
fi
# do not check another device
break
fi
done

View File

@@ -1,3 +0,0 @@
0000000 000000 000000 000000 000000 000000 000000 000000 000000
*
1048576

View File

@@ -1,3 +0,0 @@
FROM rancher/os-base
COPY cloud-init.sh /
CMD ["/cloud-init.sh"]

View File

@@ -1,15 +0,0 @@
#!/bin/bash
set -x -e
MOUNT_POINT=/media/config-2
CONFIG_DEV=$(ros dev "LABEL=config-2")
mkdir -p ${MOUNT_POINT}
if [ -e "${CONFIG_DEV}" ]; then
mount -t iso9660,vfat ${CONFIG_DEV} ${MOUNT_POINT}
else
mount -t 9p -o trans=virtio,version=9p2000.L config-2 ${MOUNT_POINT} 2>/dev/null || true
fi
cloud-init-save -network=${CLOUD_INIT_NETWORK:-true}

View File

@@ -1,12 +0,0 @@
FROM rancher/os-base
COPY update-ssh-keys /usr/sbin/
COPY build/lsb-release /etc/
RUN sed -i 's/rancher:!/rancher:*/g' /etc/shadow && \
sed -i 's/docker:!/docker:*/g' /etc/shadow && \
sed -i 's/#ClientAliveInterval 0/ClientAliveInterval 180/g' /etc/ssh/sshd_config && \
echo '## allow password less for rancher user' >> /etc/sudoers && \
echo 'rancher ALL=(ALL) NOPASSWD: ALL' >> /etc/sudoers && \
echo '## allow password less for docker user' >> /etc/sudoers && \
echo 'docker ALL=(ALL) NOPASSWD: ALL' >> /etc/sudoers
COPY prompt.sh /etc/profile.d/
CMD ["/usr/sbin/console.sh"]

View File

@@ -1,15 +0,0 @@
#!/bin/bash
set -e
VERSION=${VERSION:?"VERSION not set"}
cd $(dirname $0)
rm -rf ./build
mkdir -p ./build
cat > ./build/lsb-release << EOF
DISTRIB_ID=${DISTRIB_ID}
DISTRIB_RELEASE=${VERSION}
DISTRIB_DESCRIPTION="${DISTRIB_ID} ${VERSION}"
EOF

View File

@@ -1 +0,0 @@
export PS1='[\u@\h \W]\$ '

View File

@@ -1,20 +0,0 @@
#!/bin/bash
USERNAME=$1
HOME_DIR=$(grep ^$USERNAME /etc/passwd | cut -f6 -d:)
if [ ! -d $HOME_DIR/.ssh ]; then
mkdir -p $HOME_DIR/.ssh
chmod 0700 $HOME_DIR/.ssh
fi
if [ ! -e $HOME_DIR/.ssh/authorized_keys ]; then
touch $HOME_DIR/.ssh/authorized_keys
chmod 0600 $HOME_DIR/.ssh/authorized_keys
fi
if ! grep -q "$2" $HOME_DIR/.ssh/authorized_keys; then
echo "$2" >> $HOME_DIR/.ssh/authorized_keys
fi
chown -R $USERNAME $HOME_DIR/.ssh

View File

@@ -1,4 +0,0 @@
FROM rancher/os-base
RUN ln -sf /var/lib/rancher/engine/docker /usr/bin/docker
COPY preload.sh /
CMD ["/preload.sh"]

View File

@@ -1,46 +0,0 @@
#!/bin/bash
set -e
BASE=${1:-${PRELOAD_DIR}}
BASE=${BASE:-/mnt/preload}
if [ "${SYSTEM_IMAGES}" = "true" ]; then
docker_bin=system-docker
else
docker_bin=docker
fi
should_load() {
file=${1}
if [[ ${file} =~ \.done$ ]]; then echo false
elif [ -f ${file} ]; then
if [[ ${file} -nt ${file}.done ]]; then echo true
else echo false
fi
else echo false
fi
}
if [ -d ${BASE} ]; then
echo Preloading docker images from ${BASE}...
for file in $(ls ${BASE}); do
path=${BASE}/${file}
loading=$(should_load ${path})
if [ ${loading} == "true" ]; then
CAT="cat ${path}"
if [[ ${file} =~ \.t?gz$ ]]; then CAT="${CAT} | gunzip"; fi
if [[ ${file} =~ \.t?xz$ ]]; then CAT="${CAT} | unxz"; fi
wait-for-docker
CAT="${CAT} | ${docker_bin} load"
echo loading from ${path}
eval ${CAT} || :
touch ${path}.done || :
fi
done
echo Done.
else
echo Can not preload images from ${BASE}: not a dir or does not exist.
fi

View File

@@ -1,3 +0,0 @@
FROM rancher/os-base
COPY state.sh /usr/sbin/
CMD ["/usr/sbin/state.sh"]

View File

@@ -1,12 +0,0 @@
#!/bin/bash
set -x
if [ "$(ros config get rancher.state.mdadm_scan)" = "true" ]; then
mdadm --assemble --scan
fi
ros config get rancher.state.script > config.sh
if [ -s config.sh ]; then
chmod +x config.sh
exec ./config.sh
fi

View File

@@ -1,4 +0,0 @@
FROM rancher/os-base
COPY syslog.sh /
RUN sed -i 1,10d /etc/rsyslog.conf
CMD ["/syslog.sh"]

View File

@@ -1,5 +0,0 @@
#!bin/bash
set -x -e
exec rsyslogd -n

View File

@@ -1,3 +0,0 @@
FROM rancher/os-base
COPY udev.sh /
CMD ["/udev.sh"]

Some files were not shown because too many files have changed in this diff Show More