Compare commits
436 Commits
v0.8.x
...
v1.0.x-eol
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
eaee6ef773 | ||
|
|
929bb5abce | ||
|
|
1205428d20 | ||
|
|
730b26e5e2 | ||
|
|
b30d6db3b0 | ||
|
|
b35d682cca | ||
|
|
79e7388d65 | ||
|
|
5b812cf815 | ||
|
|
ba4cbcd1d9 | ||
|
|
5b00d8ee7a | ||
|
|
1e2e950709 | ||
|
|
f4912b3ff9 | ||
|
|
6a09845340 | ||
|
|
c8915d646d | ||
|
|
67d932d9f8 | ||
|
|
5716be7a34 | ||
|
|
c6510571c5 | ||
|
|
7b0d400693 | ||
|
|
9c3321d4a3 | ||
|
|
c291763251 | ||
|
|
cc58b8c6b2 | ||
|
|
2719d8a8e4 | ||
|
|
d289b153a4 | ||
|
|
b630bc836b | ||
|
|
170df073e6 | ||
|
|
47320f9350 | ||
|
|
f9dd8945e6 | ||
|
|
63c75c68cb | ||
|
|
a3f942f03b | ||
|
|
d244043ce7 | ||
|
|
3de5a836c9 | ||
|
|
0ecdfff839 | ||
|
|
12409e7e20 | ||
|
|
732f1924e1 | ||
|
|
6dc7faf9b2 | ||
|
|
5ba2c76c71 | ||
|
|
a51d688f5f | ||
|
|
24f45e4eba | ||
|
|
4e6fff8ed1 | ||
|
|
d36d4555aa | ||
|
|
ad64c50cc1 | ||
|
|
cbfe50c5ee | ||
|
|
2cd3cb2442 | ||
|
|
32061238aa | ||
|
|
a40ac4ea03 | ||
|
|
75d384bbe8 | ||
|
|
4fcc3a880c | ||
|
|
424b7e5b9b | ||
|
|
c7b764693d | ||
|
|
1ef301c631 | ||
|
|
ce9ae25741 | ||
|
|
7cb46e1919 | ||
|
|
cbcdb2628c | ||
|
|
7fb9afe39c | ||
|
|
6f33622a12 | ||
|
|
765a7c3ed4 | ||
|
|
a04c0f3740 | ||
|
|
6fcc1e3967 | ||
|
|
d9d3c2b0d8 | ||
|
|
d859052453 | ||
|
|
180fe241d8 | ||
|
|
553fed3eea | ||
|
|
c94a683b87 | ||
|
|
45e422d01e | ||
|
|
fecbb9df2e | ||
|
|
53225f6e9b | ||
|
|
09569f68bd | ||
|
|
fd5c81978a | ||
|
|
2a7da35139 | ||
|
|
a0fcec674f | ||
|
|
a4e7036086 | ||
|
|
03f90fd748 | ||
|
|
4fc82b69ef | ||
|
|
a366336895 | ||
|
|
af965e9446 | ||
|
|
8327006f61 | ||
|
|
242844f084 | ||
|
|
420d17ad27 | ||
|
|
f36eb70e74 | ||
|
|
1048a4eead | ||
|
|
92f5dd3752 | ||
|
|
fca70ede1b | ||
|
|
8dc7fb9494 | ||
|
|
f45243eae9 | ||
|
|
0ccc5ad735 | ||
|
|
bf98a1ae3f | ||
|
|
bb20e96a98 | ||
|
|
43c620c4d8 | ||
|
|
47261eab01 | ||
|
|
3b4d73e106 | ||
|
|
33a60488cd | ||
|
|
7615c26f44 | ||
|
|
402af04b44 | ||
|
|
cbe24ca06b | ||
|
|
de8faafb72 | ||
|
|
c67eba4dbb | ||
|
|
671a78ac08 | ||
|
|
b733bde9cd | ||
|
|
204facc395 | ||
|
|
8289d4e1bb | ||
|
|
85436f675b | ||
|
|
1f045bc696 | ||
|
|
b4584a616a | ||
|
|
fd8a4df4a6 | ||
|
|
340bb42160 | ||
|
|
0d9fd52c42 | ||
|
|
c5d4cb91c3 | ||
|
|
5e4b5975a9 | ||
|
|
bc04aa99ed | ||
|
|
a8da4c7e78 | ||
|
|
adb2d5d697 | ||
|
|
0fb89736e4 | ||
|
|
f673138932 | ||
|
|
8a741c5d32 | ||
|
|
b1d9732f65 | ||
|
|
5078c80c36 | ||
|
|
305155f92a | ||
|
|
959cdd5ddd | ||
|
|
a281a84cca | ||
|
|
3b731ade43 | ||
|
|
cdb26fac99 | ||
|
|
45155f4e6a | ||
|
|
487610f6c5 | ||
|
|
b328ecaec3 | ||
|
|
e466ea667a | ||
|
|
260379d2b7 | ||
|
|
fb96c470a9 | ||
|
|
19e2c91f1b | ||
|
|
28c3181518 | ||
|
|
39e1339fb0 | ||
|
|
6c6d23b649 | ||
|
|
a2e3c9aa50 | ||
|
|
09bd518cd0 | ||
|
|
5dd92a610b | ||
|
|
2bbd3b375a | ||
|
|
ecd2853bdb | ||
|
|
af60ac5798 | ||
|
|
49e4315251 | ||
|
|
3c34f77616 | ||
|
|
9106a97f34 | ||
|
|
68b005bc50 | ||
|
|
94a4fe7778 | ||
|
|
96b8a83c35 | ||
|
|
59bd47a0e6 | ||
|
|
4c49c8fef5 | ||
|
|
75168c6d11 | ||
|
|
3978d93fca | ||
|
|
034073b8ab | ||
|
|
ed3f08f0f9 | ||
|
|
8d894ba396 | ||
|
|
cc133372a7 | ||
|
|
39922220b0 | ||
|
|
d789c9fba9 | ||
|
|
b5ca78c269 | ||
|
|
c539270c2a | ||
|
|
ff3db59776 | ||
|
|
922b23eb78 | ||
|
|
1e3cd14af7 | ||
|
|
3e68d3c92c | ||
|
|
853d27dffc | ||
|
|
284f029b19 | ||
|
|
437034cb48 | ||
|
|
06743261b9 | ||
|
|
c467aedcda | ||
|
|
f9cbc5ce34 | ||
|
|
172a4782df | ||
|
|
4ec338a9e1 | ||
|
|
ad1fb97378 | ||
|
|
51aff79c7e | ||
|
|
e37b7c5331 | ||
|
|
4551278b99 | ||
|
|
60ac78b816 | ||
|
|
e3268e2b62 | ||
|
|
26939ebb7e | ||
|
|
1eb6991798 | ||
|
|
d0bfdb444e | ||
|
|
6bbde90e0a | ||
|
|
d52b995450 | ||
|
|
f0b9928541 | ||
|
|
d11a92418d | ||
|
|
411810bb2d | ||
|
|
0f0c9b6149 | ||
|
|
7805c91f98 | ||
|
|
ef7c16d4d4 | ||
|
|
ff87df4231 | ||
|
|
cfd9e80ac6 | ||
|
|
7d4685dcdd | ||
|
|
272f3942f1 | ||
|
|
abaecf4e44 | ||
|
|
e054331f11 | ||
|
|
53fced8122 | ||
|
|
b033444d20 | ||
|
|
242d42fbb1 | ||
|
|
62c9096164 | ||
|
|
9962e4c232 | ||
|
|
d2544357dc | ||
|
|
b27fe9422c | ||
|
|
bd857716a3 | ||
|
|
85c8916f4e | ||
|
|
b466ac9d45 | ||
|
|
3aac1ad148 | ||
|
|
a692402798 | ||
|
|
6d5fc4c499 | ||
|
|
42e366a821 | ||
|
|
b217ad5732 | ||
|
|
4ae80319dd | ||
|
|
096d990b4e | ||
|
|
b395b93571 | ||
|
|
720b54f1fd | ||
|
|
bed064419b | ||
|
|
f0f990f08d | ||
|
|
b22bf7476c | ||
|
|
6520d8a1b9 | ||
|
|
021c3a0d8d | ||
|
|
1655c1963e | ||
|
|
bbb904b149 | ||
|
|
1889d95199 | ||
|
|
69f876b73e | ||
|
|
f7605990a5 | ||
|
|
4602ccc2cb | ||
|
|
4e16b1c9bd | ||
|
|
1e26b0b687 | ||
|
|
cf89c124a4 | ||
|
|
923e4629a5 | ||
|
|
6e5433df0d | ||
|
|
cca8fe280d | ||
|
|
e7ff8e65e9 | ||
|
|
070ba1a023 | ||
|
|
15eab4a4b0 | ||
|
|
d038f8fd7d | ||
|
|
b6847b40d0 | ||
|
|
aef937609e | ||
|
|
70ea28669a | ||
|
|
063f12dabe | ||
|
|
6249708038 | ||
|
|
1048c939ac | ||
|
|
fd9a9d2ad2 | ||
|
|
7530cb4374 | ||
|
|
e51a391cc5 | ||
|
|
187f5d27d0 | ||
|
|
6911b4c01d | ||
|
|
f242815b6d | ||
|
|
d6d891ced9 | ||
|
|
922fa42205 | ||
|
|
6605c3bbd4 | ||
|
|
73f12a9004 | ||
|
|
cdc3a189ad | ||
|
|
db096d6e76 | ||
|
|
189df7e911 | ||
|
|
a4bfa75289 | ||
|
|
bc12860e7e | ||
|
|
b573e0f378 | ||
|
|
1332d3d5e1 | ||
|
|
fc2d4f5c71 | ||
|
|
2cd67d9bbe | ||
|
|
32714dbde2 | ||
|
|
5941a6ac4b | ||
|
|
5ceb1dd9ee | ||
|
|
6a07450503 | ||
|
|
ff2d8e0613 | ||
|
|
a21c414ce1 | ||
|
|
56ac7f1f87 | ||
|
|
d86975ba57 | ||
|
|
acc60f63e2 | ||
|
|
6ae1a92da7 | ||
|
|
4eee06e578 | ||
|
|
e05f30c4fc | ||
|
|
9e0302fd85 | ||
|
|
7b2b3a3fb8 | ||
|
|
2a6e06fdc6 | ||
|
|
f38a4eadda | ||
|
|
3f0e76e866 | ||
|
|
2c1e20662c | ||
|
|
2e9b86757b | ||
|
|
3e331c8ac5 | ||
|
|
73617b8a5a | ||
|
|
73ff97e465 | ||
|
|
79719e74c8 | ||
|
|
8491a7cedd | ||
|
|
a0061beedf | ||
|
|
8ecd6a45a1 | ||
|
|
a90bce0d23 | ||
|
|
4291cbfc86 | ||
|
|
544695d670 | ||
|
|
f50e9fc8a5 | ||
|
|
75f8c5c4ff | ||
|
|
738cfefdbd | ||
|
|
4a518ebfc9 | ||
|
|
096c281ded | ||
|
|
7b421d02b0 | ||
|
|
7a51d3695c | ||
|
|
c04357c32d | ||
|
|
3186728de5 | ||
|
|
6a3624aea6 | ||
|
|
1067ffec78 | ||
|
|
4dae68c51c | ||
|
|
7e30bb9983 | ||
|
|
3900bc385e | ||
|
|
8afeca5a00 | ||
|
|
34a0f29b7f | ||
|
|
223a7b49eb | ||
|
|
f28d658cda | ||
|
|
0816893d97 | ||
|
|
2cbd384229 | ||
|
|
a677b753bc | ||
|
|
5de796c05a | ||
|
|
4997104f70 | ||
|
|
19a595773b | ||
|
|
c98844ec45 | ||
|
|
fde6789d4a | ||
|
|
3fefb5f888 | ||
|
|
5a961b8887 | ||
|
|
79a7e59adb | ||
|
|
84926cb463 | ||
|
|
158517eab5 | ||
|
|
2516850976 | ||
|
|
ac5cb304d6 | ||
|
|
d35e0e05d8 | ||
|
|
fdc16672d5 | ||
|
|
1356e609b3 | ||
|
|
4410480fd6 | ||
|
|
bc3f2a195d | ||
|
|
261be61cc0 | ||
|
|
088249d751 | ||
|
|
babf6ddb48 | ||
|
|
8b0be9cd2b | ||
|
|
748be0ad66 | ||
|
|
41e02e6f64 | ||
|
|
d4ae014f76 | ||
|
|
ecdd081c27 | ||
|
|
8f69c1faff | ||
|
|
02a47b2edc | ||
|
|
2f28a00e02 | ||
|
|
da5cab621a | ||
|
|
74136bf8e6 | ||
|
|
53c88bc505 | ||
|
|
5dfcd31b54 | ||
|
|
59a752c306 | ||
|
|
77759afcaa | ||
|
|
ca0d475c83 | ||
|
|
5ea76f704a | ||
|
|
b2cbd62a8d | ||
|
|
8f4b2bc458 | ||
|
|
571597dde3 | ||
|
|
63f8277ecb | ||
|
|
15699a253c | ||
|
|
060390c160 | ||
|
|
fb7a5745c2 | ||
|
|
553e21f919 | ||
|
|
af5935d3f2 | ||
|
|
2a8d8fa891 | ||
|
|
19fcea6264 | ||
|
|
ab3c508a39 | ||
|
|
6a18025fe5 | ||
|
|
2fb3c6fe3e | ||
|
|
693ca3179b | ||
|
|
c3a501d33d | ||
|
|
daed587841 | ||
|
|
ff4b315d0c | ||
|
|
e5f90c5ac5 | ||
|
|
ff2d445039 | ||
|
|
26c2f3cc69 | ||
|
|
5b7bb8c81f | ||
|
|
7e71e4c876 | ||
|
|
76054f1152 | ||
|
|
19157702b3 | ||
|
|
18e0ea81d9 | ||
|
|
a66463285b | ||
|
|
eb0c4b2982 | ||
|
|
1aa8521cf8 | ||
|
|
bcc1aed724 | ||
|
|
2de5daffe9 | ||
|
|
235857d021 | ||
|
|
1e5baa57da | ||
|
|
c62c05773c | ||
|
|
af6888020d | ||
|
|
4c2d21275a | ||
|
|
73980f9c73 | ||
|
|
368a13ed13 | ||
|
|
f6ce1f0685 | ||
|
|
4981e76755 | ||
|
|
8babf66dc4 | ||
|
|
8ee82f263d | ||
|
|
2d92956c82 | ||
|
|
4cd73c111e | ||
|
|
355859e707 | ||
|
|
2cd6ec4db6 | ||
|
|
dc540a0cf0 | ||
|
|
93cd0877dd | ||
|
|
8d941162d8 | ||
|
|
23e51e3b8d | ||
|
|
27f11ec6c2 | ||
|
|
63c3d57993 | ||
|
|
8080d01ac9 | ||
|
|
d1d0c30924 | ||
|
|
a8ade0f873 | ||
|
|
6611eb1134 | ||
|
|
e80342d369 | ||
|
|
23edbd05e8 | ||
|
|
299d59b5fc | ||
|
|
90963f8f45 | ||
|
|
9afc3da083 | ||
|
|
be9874d2f4 | ||
|
|
8a4fa93202 | ||
|
|
78c08c4dd9 | ||
|
|
d65f9518df | ||
|
|
64949bb888 | ||
|
|
17b3589782 | ||
|
|
00af8545d6 | ||
|
|
4126cdbba7 | ||
|
|
e4c2271c6b | ||
|
|
b5fdd63a85 | ||
|
|
0779e13d46 | ||
|
|
5dbb0f2a28 | ||
|
|
8dc2050fd8 | ||
|
|
cdd682429e | ||
|
|
51de09e16e | ||
|
|
42248daf60 | ||
|
|
6d606cc52b | ||
|
|
b2e0510697 | ||
|
|
d26d20d730 | ||
|
|
ff98f27407 | ||
|
|
df32dfdc70 | ||
|
|
10a4c59385 | ||
|
|
78051c2814 | ||
|
|
391082fa50 | ||
|
|
23a4d8ec76 | ||
|
|
8fa2d80325 | ||
|
|
be2c4044ce | ||
|
|
4f177ee605 | ||
|
|
e2ed97648a | ||
|
|
9b793b5d7c | ||
|
|
d9ad645f6d | ||
|
|
f096f552d1 | ||
|
|
f94704a803 | ||
|
|
e43fb097c8 |
@@ -12,6 +12,6 @@ tests/integration/.tox
|
||||
*/*/*/*.pyc
|
||||
*/*/*/__pycache__
|
||||
.trash-cache
|
||||
.dapper
|
||||
#.dapper
|
||||
vendor/*/*/*/.git
|
||||
tmp
|
||||
|
||||
@@ -1,8 +1,13 @@
|
||||
FROM ubuntu:16.04
|
||||
# FROM arm64=aarch64/ubuntu:16.04 arm=armhf/ubuntu:16.04
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
# get the apt-cacher proxy set
|
||||
ARG APTPROXY=
|
||||
|
||||
RUN echo "Acquire::http { Proxy \"$APTPROXY\"; };" >> /etc/apt/apt.conf.d/01proxy \
|
||||
&& cat /etc/apt/apt.conf.d/01proxy \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y --no-install-recommends \
|
||||
build-essential \
|
||||
ca-certificates \
|
||||
cpio \
|
||||
@@ -19,6 +24,7 @@ RUN apt-get update && \
|
||||
libselinux1-dev \
|
||||
locales \
|
||||
module-init-tools \
|
||||
mtools \
|
||||
openssh-client \
|
||||
pkg-config \
|
||||
qemu \
|
||||
@@ -28,11 +34,12 @@ RUN apt-get update && \
|
||||
syslinux-common \
|
||||
vim \
|
||||
wget \
|
||||
xorriso
|
||||
xorriso \
|
||||
telnet
|
||||
|
||||
########## Dapper Configuration #####################
|
||||
|
||||
ENV DAPPER_ENV VERSION DEV_BUILD RUNTEST
|
||||
ENV DAPPER_ENV VERSION DEV_BUILD RUNTEST DEBUG APTPROXY ENGINE_REGISTRY_MIRROR INTEGRATION_TESTS
|
||||
ENV DAPPER_DOCKER_SOCKET true
|
||||
ENV DAPPER_SOURCE /go/src/github.com/rancher/os
|
||||
ENV DAPPER_OUTPUT ./bin ./dist ./build/initrd ./build/kernel
|
||||
@@ -56,9 +63,9 @@ ARG DOCKER_BUILD_VERSION=1.10.3
|
||||
ARG DOCKER_BUILD_PATCH_VERSION=v${DOCKER_BUILD_VERSION}-ros1
|
||||
ARG SELINUX_POLICY_URL=https://github.com/rancher/refpolicy/releases/download/v0.0.3/policy.29
|
||||
|
||||
ARG KERNEL_VERSION_amd64=4.9.12-rancher
|
||||
ARG KERNEL_VERSION_amd64=4.9.78-rancher
|
||||
ARG KERNEL_URL_amd64=https://github.com/rancher/os-kernel/releases/download/v${KERNEL_VERSION_amd64}/linux-${KERNEL_VERSION_amd64}-x86.tar.gz
|
||||
ARG KERNEL_URL_arm64=https://github.com/imikushin/os-kernel/releases/download/Estuary-4.4.0-arm64.8/linux-4.4.0-rancher-arm64.tar.gz
|
||||
#ARG KERNEL_URL_arm64=https://github.com/imikushin/os-kernel/releases/download/Estuary-4.4.0-arm64.8/linux-4.4.0-rancher-arm64.tar.gz
|
||||
|
||||
ARG DOCKER_URL_amd64=https://get.docker.com/builds/Linux/x86_64/docker-${DOCKER_VERSION}.tgz
|
||||
ARG DOCKER_URL_arm=https://github.com/rancher/docker/releases/download/${DOCKER_PATCH_VERSION}/docker-${DOCKER_VERSION}_arm.tgz
|
||||
@@ -74,9 +81,9 @@ ARG OS_SERVICES_REPO=https://raw.githubusercontent.com/${OS_REPO}/os-services
|
||||
ARG IMAGE_NAME=${OS_REPO}/os
|
||||
ARG DFS_IMAGE=${OS_REPO}/docker:v${DOCKER_VERSION}-2
|
||||
|
||||
ARG OS_BASE_URL_amd64=https://github.com/rancher/os-base/releases/download/v2016.08.1-2/os-base_amd64.tar.xz
|
||||
ARG OS_BASE_URL_arm64=https://github.com/rancher/os-base/releases/download/v2016.08.1-2/os-base_arm64.tar.xz
|
||||
ARG OS_BASE_URL_arm=https://github.com/rancher/os-base/releases/download/v2016.08.1-3/os-base_arm.tar.xz
|
||||
ARG OS_BASE_URL_amd64=https://github.com/rancher/os-base/releases/download/v2017.02.5-1/os-base_amd64.tar.xz
|
||||
ARG OS_BASE_URL_arm64=https://github.com/rancher/os-base/releases/download/v2017.02.5-1/os-base_arm64.tar.xz
|
||||
ARG OS_BASE_URL_arm=https://github.com/rancher/os-base/releases/download/v2017.02.5-1/os-base_arm.tar.xz
|
||||
######################################################
|
||||
|
||||
# Set up environment and export all ARGS as ENV
|
||||
|
||||
38
Makefile
Normal file → Executable file
38
Makefile
Normal file → Executable file
@@ -1,4 +1,4 @@
|
||||
TARGETS := $(shell ls scripts | grep -vE 'clean|run|help|docs')
|
||||
TARGETS := $(shell ls scripts | grep -vE 'clean|run|help|docs|release|build-moby|run-moby')
|
||||
|
||||
.dapper:
|
||||
@echo Downloading dapper
|
||||
@@ -28,14 +28,50 @@ run: build/initrd/.id .dapper
|
||||
docs:
|
||||
./scripts/docs
|
||||
|
||||
build-moby:
|
||||
./scripts/build-moby
|
||||
|
||||
run-moby:
|
||||
./scripts/run-moby
|
||||
|
||||
shell-bind: .dapper
|
||||
./.dapper -m bind -s
|
||||
|
||||
clean:
|
||||
@./scripts/clean
|
||||
|
||||
release: .dapper release-build qcows
|
||||
|
||||
release-build:
|
||||
mkdir -p dist
|
||||
./.dapper release 2>&1 | tee dist/release.log
|
||||
|
||||
itest:
|
||||
mkdir -p dist
|
||||
./.dapper integration-test 2>&1 | tee dist/itest.log
|
||||
grep FAIL dist/itest.log || true
|
||||
|
||||
qcows:
|
||||
cp dist/artifacts/rancheros.iso scripts/images/openstack/
|
||||
cd scripts/images/openstack && \
|
||||
APPEND="console=tty1 console=ttyS0,115200n8 printk.devkmsg=on rancher.autologin=ttyS0" \
|
||||
NAME=openstack ../../../.dapper
|
||||
cd scripts/images/openstack && \
|
||||
APPEND="console=tty1 rancher.debug=true printk.devkmsg=on notsc clocksource=kvm-clock rancher.network.interfaces.eth0.ipv4ll rancher.cloud_init.datasources=[digitalocean] rancher.autologin=tty1 rancher.autologin=ttyS0" \
|
||||
NAME=digitalocean ../../../.dapper
|
||||
cp ./scripts/images/openstack/dist/*.img dist/artifacts/
|
||||
|
||||
rpi:
|
||||
# scripts/images/raspberry-pi-hypriot/dist/rancheros-raspberry-pi.zip
|
||||
cp dist/artifacts/rootfs_arm.tar.gz scripts/images/raspberry-pi-hypriot/
|
||||
cd scripts/images/raspberry-pi-hypriot/ \
|
||||
&& ../../../.dapper
|
||||
|
||||
rpi64:
|
||||
# scripts/images/raspberry-pi-hypriot64/dist/rancheros-raspberry-pi.zip
|
||||
cp dist/artifacts/rootfs_arm64.tar.gz scripts/images/raspberry-pi-hypriot64/
|
||||
cd scripts/images/raspberry-pi-hypriot64/ \
|
||||
&& ../../../.dapper
|
||||
|
||||
help:
|
||||
@./scripts/help
|
||||
|
||||
85
README.md
85
README.md
@@ -14,44 +14,54 @@ it would really be bad if somebody did `docker rm -f $(docker ps -qa)` and delet
|
||||
|
||||
## Latest Release
|
||||
|
||||
**v0.8.0 - Docker 1.12.6 - Linux 4.9.9**
|
||||
**v1.0.3 - Docker 17.03.1-ce - Linux 4.9.34**
|
||||
|
||||
### ISO
|
||||
|
||||
https://releases.rancher.com/os/latest/rancheros.iso
|
||||
https://releases.rancher.com/os/v0.8.0/rancheros.iso
|
||||
- https://releases.rancher.com/os/latest/rancheros.iso
|
||||
- https://releases.rancher.com/os/v1.0.3/rancheros.iso
|
||||
|
||||
### Additional Downloads
|
||||
|
||||
#### Latest Links
|
||||
|
||||
* https://releases.rancher.com/os/latest/initrd-v0.8.0
|
||||
* https://releases.rancher.com/os/latest/initrd
|
||||
* https://releases.rancher.com/os/latest/initrd-v1.0.3
|
||||
* https://releases.rancher.com/os/latest/iso-checksums.txt
|
||||
* https://releases.rancher.com/os/latest/rancheros-openstack.img
|
||||
* https://releases.rancher.com/os/latest/rancheros.ipxe
|
||||
* https://releases.rancher.com/os/latest/rancheros.iso
|
||||
* https://releases.rancher.com/os/latest/rancheros-v0.8.0.tar.gz
|
||||
* https://releases.rancher.com/os/latest/rancheros-v1.0.3.tar.gz
|
||||
* https://releases.rancher.com/os/latest/rootfs.tar.gz
|
||||
* https://releases.rancher.com/os/latest/vmlinuz
|
||||
* https://releases.rancher.com/os/latest/vmlinuz-4.9.34-rancher
|
||||
|
||||
#### v1.0.3 Links
|
||||
|
||||
* https://releases.rancher.com/os/v1.0.3/initrd
|
||||
* https://releases.rancher.com/os/v1.0.3/initrd-v1.0.3
|
||||
* https://releases.rancher.com/os/v1.0.3/iso-checksums.txt
|
||||
* https://releases.rancher.com/os/v1.0.3/rancheros-openstack.img
|
||||
* https://releases.rancher.com/os/v1.0.3/rancheros.ipxe
|
||||
* https://releases.rancher.com/os/v1.0.3/rancheros.iso
|
||||
* https://releases.rancher.com/os/v1.0.3/rancheros-v1.0.3.tar.gz
|
||||
* https://releases.rancher.com/os/v1.0.3/rootfs.tar.gz
|
||||
* https://releases.rancher.com/os/v1.0.3/vmlinuz
|
||||
* https://releases.rancher.com/os/v1.0.3/vmlinuz-4.9.34-rancher
|
||||
|
||||
#### ARM Links
|
||||
|
||||
* https://releases.rancher.com/os/latest/rootfs_arm.tar.gz
|
||||
* https://releases.rancher.com/os/latest/rootfs_arm64.tar.gz
|
||||
* https://releases.rancher.com/os/latest/vmlinuz-4.9.9-rancher
|
||||
* https://releases.rancher.com/os/latest/rancheros-raspberry-pi.zip
|
||||
* https://releases.rancher.com/os/latest/rancheros-raspberry-pi64.zip
|
||||
|
||||
#### v0.8.0 Links
|
||||
* https://releases.rancher.com/os/v1.0.3/rootfs_arm.tar.gz
|
||||
* https://releases.rancher.com/os/v1.0.3/rootfs_arm64.tar.gz
|
||||
* https://releases.rancher.com/os/v1.0.3/rancheros-raspberry-pi.zip
|
||||
* https://releases.rancher.com/os/v1.0.3/rancheros-raspberry-pi64.zip
|
||||
|
||||
* https://releases.rancher.com/os/v0.8.0/initrd-v0.8.0
|
||||
* https://releases.rancher.com/os/v0.8.0/iso-checksums.txt
|
||||
* https://releases.rancher.com/os/v0.8.0/rancheros-openstack.img
|
||||
* https://releases.rancher.com/os/v0.8.0/rancheros.iso
|
||||
* https://releases.rancher.com/os/v0.8.0/rancheros-v0.8.0.tar.gz
|
||||
* https://releases.rancher.com/os/v0.8.0/rootfs.tar.gz
|
||||
* https://releases.rancher.com/os/v0.8.0/rootfs_arm.tar.gz
|
||||
* https://releases.rancher.com/os/v0.8.0/rootfs_arm64.tar.gz
|
||||
* https://releases.rancher.com/os/v0.8.0/vmlinuz-4.9.9-rancher
|
||||
|
||||
#### Raspberry Pi release for v0.8.0 is coming
|
||||
|
||||
* https://releases.rancher.com/os/v0.7.0/rancheros-raspberry-pi.zip
|
||||
|
||||
**Note**: you can use `http` instead of `https` in the above URLs, e.g. for iPXE.
|
||||
**Note**: you can use `http` instead of `https` in the above URLs, e.g. for iPXE.
|
||||
|
||||
### Amazon
|
||||
|
||||
@@ -61,27 +71,28 @@ SSH keys are added to the **`rancher`** user, so you must log in using the **ran
|
||||
|
||||
Region | Type | AMI |
|
||||
-------|------|------
|
||||
ap-south-1 | HVM | [ami-268cfd49](https://ap-south-1.console.aws.amazon.com/ec2/home?region=ap-south-1#launchInstanceWizard:ami=ami-268cfd49)
|
||||
eu-west-2 | HVM | [ami-960316f2](https://eu-west-2.console.aws.amazon.com/ec2/home?region=eu-west-2#launchInstanceWizard:ami=ami-960316f2)
|
||||
eu-west-1 | HVM | [ami-122a0b74](https://eu-west-1.console.aws.amazon.com/ec2/home?region=eu-west-1#launchInstanceWizard:ami=ami-122a0b74)
|
||||
ap-northeast-2 | HVM | [ami-8aa474e4](https://ap-northeast-2.console.aws.amazon.com/ec2/home?region=ap-northeast-2#launchInstanceWizard:ami=ami-8aa474e4)
|
||||
ap-northeast-1 | HVM | [ami-5891dd3f](https://ap-northeast-1.console.aws.amazon.com/ec2/home?region=ap-northeast-1#launchInstanceWizard:ami=ami-5891dd3f)
|
||||
sa-east-1 | HVM | [ami-99a3c4f5](https://sa-east-1.console.aws.amazon.com/ec2/home?region=sa-east-1#launchInstanceWizard:ami=ami-99a3c4f5)
|
||||
ca-central-1 | HVM | [ami-bb902ddf](https://ca-central-1.console.aws.amazon.com/ec2/home?region=ca-central-1#launchInstanceWizard:ami=ami-bb902ddf)
|
||||
ap-southeast-1 | HVM | [ami-b818afdb](https://ap-southeast-1.console.aws.amazon.com/ec2/home?region=ap-southeast-1#launchInstanceWizard:ami=ami-b818afdb)
|
||||
ap-southeast-2 | HVM | [ami-8ba4a5e8](https://ap-southeast-2.console.aws.amazon.com/ec2/home?region=ap-southeast-2#launchInstanceWizard:ami=ami-8ba4a5e8)
|
||||
eu-central-1 | HVM | [ami-67fa3108](https://eu-central-1.console.aws.amazon.com/ec2/home?region=eu-central-1#launchInstanceWizard:ami=ami-67fa3108)
|
||||
us-east-1 | HVM | [ami-4600ce50](https://us-east-1.console.aws.amazon.com/ec2/home?region=us-east-1#launchInstanceWizard:ami=ami-4600ce50)
|
||||
us-east-2 | HVM | [ami-effadf8a](https://us-east-2.console.aws.amazon.com/ec2/home?region=us-east-2#launchInstanceWizard:ami=ami-effadf8a)
|
||||
us-west-1 | HVM | [ami-d3055ab3](https://us-west-1.console.aws.amazon.com/ec2/home?region=us-west-1#launchInstanceWizard:ami=ami-d3055ab3)
|
||||
us-west-2 | HVM | [ami-2e69ee4e](https://us-west-2.console.aws.amazon.com/ec2/home?region=us-west-2#launchInstanceWizard:ami=ami-2e69ee4e)
|
||||
ap-south-1 | HVM | [ami-3576085a](https://ap-south-1.console.aws.amazon.com/ec2/home?region=ap-south-1#launchInstanceWizard:ami=ami-3576085a)
|
||||
eu-west-2 | HVM | [ami-4806102c](https://eu-west-2.console.aws.amazon.com/ec2/home?region=eu-west-2#launchInstanceWizard:ami=ami-4806102c)
|
||||
eu-west-1 | HVM | [ami-64b2a802](https://eu-west-1.console.aws.amazon.com/ec2/home?region=eu-west-1#launchInstanceWizard:ami=ami-64b2a802)
|
||||
ap-northeast-2 | HVM | [ami-9d03dcf3](https://ap-northeast-2.console.aws.amazon.com/ec2/home?region=ap-northeast-2#launchInstanceWizard:ami=ami-9d03dcf3)
|
||||
ap-northeast-1 | HVM | [ami-8bb1a7ec](https://ap-northeast-1.console.aws.amazon.com/ec2/home?region=ap-northeast-1#launchInstanceWizard:ami=ami-8bb1a7ec)
|
||||
sa-east-1 | HVM | [ami-ae1b71c2](https://sa-east-1.console.aws.amazon.com/ec2/home?region=sa-east-1#launchInstanceWizard:ami=ami-ae1b71c2)
|
||||
ca-central-1 | HVM | [ami-4fa7182b](https://ca-central-1.console.aws.amazon.com/ec2/home?region=ca-central-1#launchInstanceWizard:ami=ami-4fa7182b)
|
||||
ap-southeast-1 | HVM | [ami-4f921c2c](https://ap-southeast-1.console.aws.amazon.com/ec2/home?region=ap-southeast-1#launchInstanceWizard:ami=ami-4f921c2c)
|
||||
ap-southeast-2 | HVM | [ami-d64c5fb5](https://ap-southeast-2.console.aws.amazon.com/ec2/home?region=ap-southeast-2#launchInstanceWizard:ami=ami-d64c5fb5)
|
||||
eu-central-1 | HVM | [ami-8c52f4e3](https://eu-central-1.console.aws.amazon.com/ec2/home?region=eu-central-1#launchInstanceWizard:ami=ami-8c52f4e3)
|
||||
us-east-1 | HVM | [ami-067c4a10](https://us-east-1.console.aws.amazon.com/ec2/home?region=us-east-1#launchInstanceWizard:ami=ami-067c4a10)
|
||||
us-east-2 | HVM | [ami-b74b6ad2](https://us-east-2.console.aws.amazon.com/ec2/home?region=us-east-2#launchInstanceWizard:ami=ami-b74b6ad2)
|
||||
us-west-1 | HVM | [ami-04351964](https://us-west-1.console.aws.amazon.com/ec2/home?region=us-west-1#launchInstanceWizard:ami=ami-04351964)
|
||||
us-west-2 | HVM | [ami-bed0c7c7](https://us-west-2.console.aws.amazon.com/ec2/home?region=us-west-2#launchInstanceWizard:ami=ami-bed0c7c7)
|
||||
|
||||
Additionally, images are available with support for Amazon EC2 Container Service (ECS) [here](https://docs.rancher.com/os/amazon-ecs/#amazon-ecs-enabled-amis).
|
||||
|
||||
### Google Compute Engine
|
||||
|
||||
We are providing a disk image that users can download and import for use in Google Compute Engine. The image can be obtained from the release artifacts for RancherOS.
|
||||
|
||||
[Download Image](https://github.com/rancher/os/releases/download/v0.8.0/rancheros-v0.8.0.tar.gz)
|
||||
[Download Image](https://github.com/rancher/os/releases/download/v1.0.0/rancheros-v1.0.0.tar.gz)
|
||||
|
||||
Please follow the directions at our [docs to launch in GCE](http://docs.rancher.com/os/running-rancheros/cloud/gce/).
|
||||
|
||||
|
||||
@@ -9,8 +9,8 @@ import (
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/coreos/coreos-cloudinit/system"
|
||||
rancherConfig "github.com/rancher/os/config"
|
||||
"github.com/rancher/os/config/cloudinit/system"
|
||||
"github.com/rancher/os/docker"
|
||||
"github.com/rancher/os/log"
|
||||
"github.com/rancher/os/util"
|
||||
@@ -71,6 +71,25 @@ func ApplyConsole(cfg *rancherConfig.CloudConfig) {
|
||||
if len(mount) != 4 {
|
||||
log.Errorf("Unable to mount %s: must specify exactly four arguments", mount[1])
|
||||
}
|
||||
|
||||
if mount[2] == "nfs" || mount[2] == "nfs4" {
|
||||
if err := os.MkdirAll(mount[1], 0755); err != nil {
|
||||
log.Errorf("Unable to create mount point %s: %v", mount[1], err)
|
||||
continue
|
||||
}
|
||||
cmdArgs := []string{mount[0], mount[1], "-t", mount[2]}
|
||||
if mount[3] != "" {
|
||||
cmdArgs = append(cmdArgs, "-o", mount[3])
|
||||
}
|
||||
cmd := exec.Command("mount", cmdArgs...)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
log.Errorf("Failed to mount %s: %v", mount[1], err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
device := util.ResolveDevice(mount[0])
|
||||
|
||||
if mount[2] == "swap" {
|
||||
@@ -84,17 +103,7 @@ func ApplyConsole(cfg *rancherConfig.CloudConfig) {
|
||||
continue
|
||||
}
|
||||
|
||||
cmdArgs := []string{device, mount[1]}
|
||||
if mount[2] != "" {
|
||||
cmdArgs = append(cmdArgs, "-t", mount[2])
|
||||
}
|
||||
if mount[3] != "" {
|
||||
cmdArgs = append(cmdArgs, "-o", mount[3])
|
||||
}
|
||||
cmd := exec.Command("mount", cmdArgs...)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
if err := util.Mount(device, mount[1], mount[2], mount[3]); err != nil {
|
||||
log.Errorf("Failed to mount %s: %v", mount[1], err)
|
||||
}
|
||||
}
|
||||
@@ -125,11 +134,15 @@ func WriteFiles(cfg *rancherConfig.CloudConfig, container string) {
|
||||
}
|
||||
|
||||
func applyPreConsole(cfg *rancherConfig.CloudConfig) {
|
||||
if _, err := os.Stat(resizeStamp); os.IsNotExist(err) && cfg.Rancher.ResizeDevice != "" {
|
||||
if err := resizeDevice(cfg); err == nil {
|
||||
os.Create(resizeStamp)
|
||||
if cfg.Rancher.ResizeDevice != "" {
|
||||
if _, err := os.Stat(resizeStamp); os.IsNotExist(err) {
|
||||
if err := resizeDevice(cfg); err == nil {
|
||||
os.Create(resizeStamp)
|
||||
} else {
|
||||
log.Errorf("Failed to resize %s: %s", cfg.Rancher.ResizeDevice, err)
|
||||
}
|
||||
} else {
|
||||
log.Errorf("Failed to resize %s: %s", cfg.Rancher.ResizeDevice, err)
|
||||
log.Infof("Skipped resizing %s because %s exists", cfg.Rancher.ResizeDevice, resizeStamp)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
281
cmd/cloudinitsave/cloudinitsave.go
Normal file → Executable file
281
cmd/cloudinitsave/cloudinitsave.go
Normal file → Executable file
@@ -16,30 +16,31 @@
|
||||
package cloudinitsave
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
yaml "github.com/cloudfoundry-incubator/candiedyaml"
|
||||
|
||||
"github.com/coreos/coreos-cloudinit/config"
|
||||
"github.com/coreos/coreos-cloudinit/datasource"
|
||||
"github.com/coreos/coreos-cloudinit/datasource/configdrive"
|
||||
"github.com/coreos/coreos-cloudinit/datasource/file"
|
||||
"github.com/coreos/coreos-cloudinit/datasource/metadata/digitalocean"
|
||||
"github.com/coreos/coreos-cloudinit/datasource/metadata/ec2"
|
||||
"github.com/coreos/coreos-cloudinit/datasource/metadata/packet"
|
||||
"github.com/coreos/coreos-cloudinit/datasource/proc_cmdline"
|
||||
"github.com/coreos/coreos-cloudinit/datasource/url"
|
||||
"github.com/coreos/coreos-cloudinit/pkg"
|
||||
"github.com/docker/docker/pkg/mount"
|
||||
"github.com/rancher/os/cmd/cloudinitsave/gce"
|
||||
"github.com/rancher/os/cmd/control"
|
||||
"github.com/rancher/os/cmd/network"
|
||||
rancherConfig "github.com/rancher/os/config"
|
||||
"github.com/rancher/os/config/cloudinit/config"
|
||||
"github.com/rancher/os/config/cloudinit/datasource"
|
||||
"github.com/rancher/os/config/cloudinit/datasource/configdrive"
|
||||
"github.com/rancher/os/config/cloudinit/datasource/file"
|
||||
"github.com/rancher/os/config/cloudinit/datasource/metadata/digitalocean"
|
||||
"github.com/rancher/os/config/cloudinit/datasource/metadata/ec2"
|
||||
"github.com/rancher/os/config/cloudinit/datasource/metadata/gce"
|
||||
"github.com/rancher/os/config/cloudinit/datasource/metadata/packet"
|
||||
"github.com/rancher/os/config/cloudinit/datasource/proccmdline"
|
||||
"github.com/rancher/os/config/cloudinit/datasource/url"
|
||||
"github.com/rancher/os/config/cloudinit/datasource/vmware"
|
||||
"github.com/rancher/os/config/cloudinit/pkg"
|
||||
"github.com/rancher/os/log"
|
||||
"github.com/rancher/os/netconf"
|
||||
"github.com/rancher/os/util"
|
||||
@@ -49,9 +50,6 @@ const (
|
||||
datasourceInterval = 100 * time.Millisecond
|
||||
datasourceMaxInterval = 30 * time.Second
|
||||
datasourceTimeout = 5 * time.Minute
|
||||
configDevName = "config-2"
|
||||
configDev = "LABEL=" + configDevName
|
||||
configDevMountPoint = "/media/config-2"
|
||||
)
|
||||
|
||||
func Main() {
|
||||
@@ -62,72 +60,39 @@ func Main() {
|
||||
log.Errorf("Failed to run udev settle: %v", err)
|
||||
}
|
||||
|
||||
cfg := rancherConfig.LoadConfig()
|
||||
network.ApplyNetworkConfig(cfg)
|
||||
|
||||
if err := SaveCloudConfig(true); err != nil {
|
||||
if err := saveCloudConfig(); err != nil {
|
||||
log.Errorf("Failed to save cloud-config: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func MountConfigDrive() error {
|
||||
if err := os.MkdirAll(configDevMountPoint, 644); err != nil {
|
||||
return err
|
||||
func saveCloudConfig() error {
|
||||
log.Infof("SaveCloudConfig")
|
||||
|
||||
cfg := rancherConfig.LoadConfig()
|
||||
log.Debugf("init: SaveCloudConfig(pre ApplyNetworkConfig): %#v", cfg.Rancher.Network)
|
||||
network.ApplyNetworkConfig(cfg)
|
||||
|
||||
log.Infof("datasources that will be consided: %#v", cfg.Rancher.CloudInit.Datasources)
|
||||
dss := getDatasources(cfg.Rancher.CloudInit.Datasources)
|
||||
if len(dss) == 0 {
|
||||
log.Errorf("currentDatasource - none found")
|
||||
return nil
|
||||
}
|
||||
|
||||
configDev := util.ResolveDevice(configDev)
|
||||
foundDs := selectDatasource(dss)
|
||||
log.Infof("Cloud-init datasource that was used: %s", foundDs)
|
||||
|
||||
if configDev == "" {
|
||||
return mount.Mount(configDevName, configDevMountPoint, "9p", "trans=virtio,version=9p2000.L")
|
||||
}
|
||||
// Apply any newly detected network config.
|
||||
cfg = rancherConfig.LoadConfig()
|
||||
log.Debugf("init: SaveCloudConfig(post ApplyNetworkConfig): %#v", cfg.Rancher.Network)
|
||||
network.ApplyNetworkConfig(cfg)
|
||||
|
||||
fsType, err := util.GetFsType(configDev)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return mount.Mount(configDev, configDevMountPoint, fsType, "ro")
|
||||
}
|
||||
|
||||
func UnmountConfigDrive() error {
|
||||
return syscall.Unmount(configDevMountPoint, 0)
|
||||
}
|
||||
|
||||
func SaveCloudConfig(network bool) error {
|
||||
userDataBytes, metadata, err := fetchUserData(network)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
userData := string(userDataBytes)
|
||||
scriptBytes := []byte{}
|
||||
|
||||
if config.IsScript(userData) {
|
||||
scriptBytes = userDataBytes
|
||||
userDataBytes = []byte{}
|
||||
} else if isCompose(userData) {
|
||||
if userDataBytes, err = composeToCloudConfig(userDataBytes); err != nil {
|
||||
log.Errorf("Failed to convert compose to cloud-config syntax: %v", err)
|
||||
return err
|
||||
}
|
||||
} else if config.IsCloudConfig(userData) {
|
||||
if _, err := rancherConfig.ReadConfig(userDataBytes, false); err != nil {
|
||||
log.WithFields(log.Fields{"cloud-config": userData, "err": err}).Warn("Failed to parse cloud-config, not saving.")
|
||||
userDataBytes = []byte{}
|
||||
}
|
||||
} else {
|
||||
log.Errorf("Unrecognized user-data\n%s", userData)
|
||||
userDataBytes = []byte{}
|
||||
}
|
||||
|
||||
if _, err := rancherConfig.ReadConfig(userDataBytes, false); err != nil {
|
||||
log.WithFields(log.Fields{"cloud-config": userData, "err": err}).Warn("Failed to parse cloud-config")
|
||||
return errors.New("Failed to parse cloud-config")
|
||||
}
|
||||
|
||||
return saveFiles(userDataBytes, scriptBytes, metadata)
|
||||
return nil
|
||||
}
|
||||
|
||||
func RequiresNetwork(datasource string) bool {
|
||||
// TODO: move into the datasources (and metadatasources)
|
||||
// and then we can enable that platforms defaults..
|
||||
parts := strings.SplitN(datasource, ":", 2)
|
||||
requiresNetwork, ok := map[string]bool{
|
||||
"ec2": true,
|
||||
@@ -157,7 +122,7 @@ func saveFiles(cloudConfigBytes, scriptBytes []byte, metadata datasource.Metadat
|
||||
if err := util.WriteFileAtomic(rancherConfig.CloudConfigBootFile, cloudConfigBytes, 400); err != nil {
|
||||
return err
|
||||
}
|
||||
log.Infof("Written to %s:\n%s", rancherConfig.CloudConfigBootFile, string(cloudConfigBytes))
|
||||
log.Infof("Wrote to %s", rancherConfig.CloudConfigBootFile)
|
||||
}
|
||||
|
||||
metaDataBytes, err := yaml.Marshal(metadata)
|
||||
@@ -168,101 +133,137 @@ func saveFiles(cloudConfigBytes, scriptBytes []byte, metadata datasource.Metadat
|
||||
if err = util.WriteFileAtomic(rancherConfig.MetaDataFile, metaDataBytes, 400); err != nil {
|
||||
return err
|
||||
}
|
||||
log.Infof("Written to %s:\n%s", rancherConfig.MetaDataFile, string(metaDataBytes))
|
||||
log.Infof("Wrote to %s", rancherConfig.MetaDataFile)
|
||||
|
||||
// if we write the empty meta yml, the merge fails.
|
||||
// TODO: the problem is that a partially filled one will still have merge issues, so that needs fixing - presumably by making merge more clever, and making more fields optional
|
||||
emptyMeta, err := yaml.Marshal(datasource.Metadata{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if bytes.Compare(metaDataBytes, emptyMeta) == 0 {
|
||||
log.Infof("not writing %s: its all defaults.", rancherConfig.CloudConfigNetworkFile)
|
||||
return nil
|
||||
}
|
||||
|
||||
type nonRancherCfg struct {
|
||||
Network netconf.NetworkConfig `yaml:"network,omitempty"`
|
||||
}
|
||||
type nonCfg struct {
|
||||
Rancher nonRancherCfg `yaml:"rancher,omitempty"`
|
||||
}
|
||||
// write the network.yml file from metadata
|
||||
cc := nonCfg{
|
||||
Rancher: nonRancherCfg{
|
||||
Network: metadata.NetworkConfig,
|
||||
},
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(path.Dir(rancherConfig.CloudConfigNetworkFile), 0700); err != nil {
|
||||
log.Errorf("Failed to create directory for file %s: %v", rancherConfig.CloudConfigNetworkFile, err)
|
||||
}
|
||||
|
||||
if err := rancherConfig.WriteToFile(cc, rancherConfig.CloudConfigNetworkFile); err != nil {
|
||||
log.Errorf("Failed to save config file %s: %v", rancherConfig.CloudConfigNetworkFile, err)
|
||||
}
|
||||
log.Infof("Wrote to %s", rancherConfig.CloudConfigNetworkFile)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func currentDatasource(network bool) (datasource.Datasource, error) {
|
||||
cfg := rancherConfig.LoadConfig()
|
||||
|
||||
dss := getDatasources(cfg, network)
|
||||
if len(dss) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
ds := selectDatasource(dss)
|
||||
return ds, nil
|
||||
}
|
||||
|
||||
func fetchUserData(network bool) ([]byte, datasource.Metadata, error) {
|
||||
func fetchAndSave(ds datasource.Datasource) error {
|
||||
var metadata datasource.Metadata
|
||||
ds, err := currentDatasource(network)
|
||||
if err != nil || ds == nil {
|
||||
log.Errorf("Failed to select datasource: %v", err)
|
||||
return nil, metadata, err
|
||||
}
|
||||
log.Infof("Fetching user-data from datasource %v", ds.Type())
|
||||
|
||||
log.Infof("Fetching user-data from datasource %s", ds)
|
||||
userDataBytes, err := ds.FetchUserdata()
|
||||
if err != nil {
|
||||
log.Errorf("Failed fetching user-data from datasource: %v", err)
|
||||
return nil, metadata, err
|
||||
return err
|
||||
}
|
||||
log.Infof("Fetching meta-data from datasource of type %v", ds.Type())
|
||||
metadata, err = ds.FetchMetadata()
|
||||
if err != nil {
|
||||
log.Errorf("Failed fetching meta-data from datasource: %v", err)
|
||||
return nil, metadata, err
|
||||
return err
|
||||
}
|
||||
return userDataBytes, metadata, nil
|
||||
|
||||
userData := string(userDataBytes)
|
||||
scriptBytes := []byte{}
|
||||
|
||||
if config.IsScript(userData) {
|
||||
scriptBytes = userDataBytes
|
||||
userDataBytes = []byte{}
|
||||
} else if isCompose(userData) {
|
||||
if userDataBytes, err = composeToCloudConfig(userDataBytes); err != nil {
|
||||
log.Errorf("Failed to convert compose to cloud-config syntax: %v", err)
|
||||
return err
|
||||
}
|
||||
} else if config.IsCloudConfig(userData) {
|
||||
if _, err := rancherConfig.ReadConfig(userDataBytes, false); err != nil {
|
||||
log.WithFields(log.Fields{"cloud-config": userData, "err": err}).Warn("Failed to parse cloud-config, not saving.")
|
||||
userDataBytes = []byte{}
|
||||
}
|
||||
} else {
|
||||
log.Errorf("Unrecognized user-data\n(%s)", userData)
|
||||
userDataBytes = []byte{}
|
||||
}
|
||||
|
||||
if _, err := rancherConfig.ReadConfig(userDataBytes, false); err != nil {
|
||||
log.WithFields(log.Fields{"cloud-config": userData, "err": err}).Warn("Failed to parse cloud-config")
|
||||
return errors.New("Failed to parse cloud-config")
|
||||
}
|
||||
|
||||
return saveFiles(userDataBytes, scriptBytes, metadata)
|
||||
}
|
||||
|
||||
// getDatasources creates a slice of possible Datasources for cloudinit based
|
||||
// on the different source command-line flags.
|
||||
func getDatasources(cfg *rancherConfig.CloudConfig, network bool) []datasource.Datasource {
|
||||
func getDatasources(datasources []string) []datasource.Datasource {
|
||||
dss := make([]datasource.Datasource, 0, 5)
|
||||
|
||||
for _, ds := range cfg.Rancher.CloudInit.Datasources {
|
||||
for _, ds := range datasources {
|
||||
parts := strings.SplitN(ds, ":", 2)
|
||||
|
||||
root := ""
|
||||
if len(parts) > 1 {
|
||||
root = parts[1]
|
||||
}
|
||||
|
||||
switch parts[0] {
|
||||
case "*":
|
||||
dss = append(dss, getDatasources([]string{"configdrive", "vmware", "ec2", "digitalocean", "packet", "gce"})...)
|
||||
case "ec2":
|
||||
if network {
|
||||
if len(parts) == 1 {
|
||||
dss = append(dss, ec2.NewDatasource(ec2.DefaultAddress))
|
||||
} else {
|
||||
dss = append(dss, ec2.NewDatasource(parts[1]))
|
||||
}
|
||||
}
|
||||
dss = append(dss, ec2.NewDatasource(root))
|
||||
case "file":
|
||||
if len(parts) == 2 {
|
||||
dss = append(dss, file.NewDatasource(parts[1]))
|
||||
if root != "" {
|
||||
dss = append(dss, file.NewDatasource(root))
|
||||
}
|
||||
case "url":
|
||||
if network {
|
||||
if len(parts) == 2 {
|
||||
dss = append(dss, url.NewDatasource(parts[1]))
|
||||
}
|
||||
if root != "" {
|
||||
dss = append(dss, url.NewDatasource(root))
|
||||
}
|
||||
case "cmdline":
|
||||
if network {
|
||||
if len(parts) == 1 {
|
||||
dss = append(dss, proc_cmdline.NewDatasource())
|
||||
}
|
||||
if len(parts) == 1 {
|
||||
dss = append(dss, proccmdline.NewDatasource())
|
||||
}
|
||||
case "configdrive":
|
||||
if len(parts) == 2 {
|
||||
dss = append(dss, configdrive.NewDatasource(parts[1]))
|
||||
if root == "" {
|
||||
root = "/media/config-2"
|
||||
}
|
||||
dss = append(dss, configdrive.NewDatasource(root))
|
||||
case "digitalocean":
|
||||
if network {
|
||||
if len(parts) == 1 {
|
||||
dss = append(dss, digitalocean.NewDatasource(digitalocean.DefaultAddress))
|
||||
} else {
|
||||
dss = append(dss, digitalocean.NewDatasource(parts[1]))
|
||||
}
|
||||
} else {
|
||||
enableDoLinkLocal()
|
||||
}
|
||||
// TODO: should we enableDoLinkLocal() - to avoid the need for the other kernel/oem options?
|
||||
dss = append(dss, digitalocean.NewDatasource(root))
|
||||
case "gce":
|
||||
if network {
|
||||
dss = append(dss, gce.NewDatasource("http://metadata.google.internal/"))
|
||||
}
|
||||
dss = append(dss, gce.NewDatasource(root))
|
||||
case "packet":
|
||||
if !network {
|
||||
enablePacketNetwork(&cfg.Rancher)
|
||||
dss = append(dss, packet.NewDatasource(root))
|
||||
case "vmware":
|
||||
// made vmware datasource dependent on detecting vmware independently, as it crashes things otherwise
|
||||
v := vmware.NewDatasource(root)
|
||||
if v != nil {
|
||||
dss = append(dss, v)
|
||||
}
|
||||
dss = append(dss, packet.NewDatasource("https://metadata.packet.net/"))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -270,13 +271,13 @@ func getDatasources(cfg *rancherConfig.CloudConfig, network bool) []datasource.D
|
||||
}
|
||||
|
||||
func enableDoLinkLocal() {
|
||||
err := netconf.ApplyNetworkConfigs(&rancherConfig.NetworkConfig{
|
||||
Interfaces: map[string]rancherConfig.InterfaceConfig{
|
||||
err := netconf.ApplyNetworkConfigs(&netconf.NetworkConfig{
|
||||
Interfaces: map[string]netconf.InterfaceConfig{
|
||||
"eth0": {
|
||||
IPV4LL: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
}, false, false)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to apply link local on eth0: %v", err)
|
||||
}
|
||||
@@ -299,13 +300,17 @@ func selectDatasource(sources []datasource.Datasource) datasource.Datasource {
|
||||
|
||||
duration := datasourceInterval
|
||||
for {
|
||||
log.Infof("Checking availability of %q\n", s.Type())
|
||||
log.Infof("cloud-init: Checking availability of %q", s.Type())
|
||||
if s.IsAvailable() {
|
||||
log.Infof("cloud-init: Datasource available: %s", s)
|
||||
ds <- s
|
||||
return
|
||||
} else if !s.AvailabilityChanges() {
|
||||
}
|
||||
if !s.AvailabilityChanges() {
|
||||
log.Infof("cloud-init: Datasource unavailable, skipping: %s", s)
|
||||
return
|
||||
}
|
||||
log.Errorf("cloud-init: Datasource not ready, will retry: %s", s)
|
||||
select {
|
||||
case <-stop:
|
||||
return
|
||||
@@ -325,6 +330,10 @@ func selectDatasource(sources []datasource.Datasource) datasource.Datasource {
|
||||
var s datasource.Datasource
|
||||
select {
|
||||
case s = <-ds:
|
||||
err := fetchAndSave(s)
|
||||
if err != nil {
|
||||
log.Errorf("Error fetching cloud-init datasource(%s): %s", s, err)
|
||||
}
|
||||
case <-done:
|
||||
case <-time.After(datasourceTimeout):
|
||||
}
|
||||
|
||||
@@ -1,104 +0,0 @@
|
||||
package cloudinitsave
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/rancher/os/log"
|
||||
|
||||
yaml "github.com/cloudfoundry-incubator/candiedyaml"
|
||||
|
||||
"github.com/packethost/packngo/metadata"
|
||||
"github.com/rancher/os/config"
|
||||
"github.com/rancher/os/netconf"
|
||||
)
|
||||
|
||||
func enablePacketNetwork(cfg *config.RancherConfig) {
|
||||
bootStrapped := false
|
||||
for _, v := range cfg.Network.Interfaces {
|
||||
if v.Address != "" {
|
||||
if err := netconf.ApplyNetworkConfigs(&cfg.Network); err != nil {
|
||||
log.Errorf("Failed to bootstrap network: %v", err)
|
||||
return
|
||||
}
|
||||
bootStrapped = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !bootStrapped {
|
||||
return
|
||||
}
|
||||
|
||||
c := metadata.NewClient(http.DefaultClient)
|
||||
m, err := c.Metadata.Get()
|
||||
if err != nil {
|
||||
log.Errorf("Failed to get Packet metadata: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
bondCfg := config.InterfaceConfig{
|
||||
Addresses: []string{},
|
||||
BondOpts: map[string]string{
|
||||
"lacp_rate": "1",
|
||||
"xmit_hash_policy": "layer3+4",
|
||||
"downdelay": "200",
|
||||
"updelay": "200",
|
||||
"miimon": "100",
|
||||
"mode": "4",
|
||||
},
|
||||
}
|
||||
netCfg := config.NetworkConfig{
|
||||
Interfaces: map[string]config.InterfaceConfig{},
|
||||
}
|
||||
for _, iface := range m.Network.Interfaces {
|
||||
netCfg.Interfaces["mac="+iface.Mac] = config.InterfaceConfig{
|
||||
Bond: "bond0",
|
||||
}
|
||||
}
|
||||
for _, addr := range m.Network.Addresses {
|
||||
bondCfg.Addresses = append(bondCfg.Addresses, fmt.Sprintf("%s/%d", addr.Address, addr.Cidr))
|
||||
if addr.Gateway != "" {
|
||||
if addr.AddressFamily == 4 {
|
||||
if addr.Public {
|
||||
bondCfg.Gateway = addr.Gateway
|
||||
}
|
||||
} else {
|
||||
bondCfg.GatewayIpv6 = addr.Gateway
|
||||
}
|
||||
}
|
||||
|
||||
if addr.AddressFamily == 4 && strings.HasPrefix(addr.Gateway, "10.") {
|
||||
bondCfg.PostUp = append(bondCfg.PostUp, "ip route add 10.0.0.0/8 via "+addr.Gateway)
|
||||
}
|
||||
}
|
||||
|
||||
netCfg.Interfaces["bond0"] = bondCfg
|
||||
b, _ := yaml.Marshal(netCfg)
|
||||
log.Debugf("Generated network config: %s", string(b))
|
||||
|
||||
cc := config.CloudConfig{
|
||||
Rancher: config.RancherConfig{
|
||||
Network: netCfg,
|
||||
},
|
||||
}
|
||||
|
||||
// Post to phone home URL on first boot
|
||||
if _, err = os.Stat(config.CloudConfigNetworkFile); err != nil {
|
||||
if _, err = http.Post(m.PhoneHomeURL, "application/json", bytes.NewReader([]byte{})); err != nil {
|
||||
log.Errorf("Failed to post to Packet phone home URL: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(path.Dir(config.CloudConfigNetworkFile), 0700); err != nil {
|
||||
log.Errorf("Failed to create directory for file %s: %v", config.CloudConfigNetworkFile, err)
|
||||
}
|
||||
|
||||
if err := config.WriteToFile(cc, config.CloudConfigNetworkFile); err != nil {
|
||||
log.Errorf("Failed to save config file %s: %v", config.CloudConfigNetworkFile, err)
|
||||
}
|
||||
}
|
||||
103
cmd/control/autologin.go
Normal file
103
cmd/control/autologin.go
Normal file
@@ -0,0 +1,103 @@
|
||||
package control
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/codegangsta/cli"
|
||||
"github.com/rancher/os/config"
|
||||
"github.com/rancher/os/log"
|
||||
)
|
||||
|
||||
func AutologinMain() {
|
||||
log.InitLogger()
|
||||
app := cli.NewApp()
|
||||
|
||||
app.Name = os.Args[0]
|
||||
app.Usage = "autologin console"
|
||||
app.Version = config.Version
|
||||
app.Author = "Rancher Labs, Inc."
|
||||
app.Email = "sven@rancher.com"
|
||||
app.EnableBashCompletion = true
|
||||
app.Action = autologinAction
|
||||
app.HideHelp = true
|
||||
app.Run(os.Args)
|
||||
}
|
||||
|
||||
func autologinAction(c *cli.Context) error {
|
||||
cmd := exec.Command("/bin/stty", "sane")
|
||||
cmd.Stderr = os.Stderr
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stdin = os.Stdin
|
||||
if err := cmd.Run(); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
usertty := ""
|
||||
user := "root"
|
||||
tty := ""
|
||||
if c.NArg() > 0 {
|
||||
usertty = c.Args().Get(0)
|
||||
s := strings.SplitN(usertty, ":", 2)
|
||||
user = s[0]
|
||||
if len(s) > 1 {
|
||||
tty = s[1]
|
||||
}
|
||||
}
|
||||
|
||||
mode := filepath.Base(os.Args[0])
|
||||
console := CurrentConsole()
|
||||
|
||||
cfg := config.LoadConfig()
|
||||
// replace \n and \l
|
||||
banner := config.Banner
|
||||
banner = strings.Replace(banner, "\\v", config.Version, -1)
|
||||
banner = strings.Replace(banner, "\\s", "RancherOS "+runtime.GOARCH, -1)
|
||||
banner = strings.Replace(banner, "\\r", config.GetKernelVersion(), -1)
|
||||
banner = strings.Replace(banner, "\\n", cfg.Hostname, -1)
|
||||
banner = strings.Replace(banner, "\\l", tty, -1)
|
||||
banner = strings.Replace(banner, "\\\\", "\\", -1)
|
||||
banner = banner + "\n"
|
||||
banner = banner + "Autologin " + console + "\n"
|
||||
fmt.Printf(banner)
|
||||
|
||||
loginBin := ""
|
||||
args := []string{}
|
||||
if console == "centos" || console == "fedora" ||
|
||||
mode == "recovery" {
|
||||
// For some reason, centos and fedora ttyS0 and tty1 don't work with `login -f rancher`
|
||||
// until I make time to read their source, lets just give us a way to get work done
|
||||
loginBin = "bash"
|
||||
args = append(args, "--login")
|
||||
os.Setenv("PROMPT_COMMAND", `echo "[`+fmt.Sprintf("Recovery console %s@%s:${PWD}", user, cfg.Hostname)+`]"`)
|
||||
} else {
|
||||
loginBin = "login"
|
||||
args = append(args, "-f", user)
|
||||
// TODO: add a PROMPT_COMMAND if we haven't switch-rooted
|
||||
}
|
||||
|
||||
loginBinPath, err := exec.LookPath(loginBin)
|
||||
if err != nil {
|
||||
fmt.Printf("error finding %s in path: %s", cmd.Args[0], err)
|
||||
return err
|
||||
}
|
||||
os.Setenv("TERM", "linux")
|
||||
|
||||
// Causes all sorts of issues
|
||||
//return syscall.Exec(loginBinPath, args, os.Environ())
|
||||
cmd = exec.Command(loginBinPath, args...)
|
||||
cmd.Env = os.Environ()
|
||||
cmd.Env = append(cmd.Env, "SVEN", "MORE")
|
||||
|
||||
cmd.Stderr = os.Stderr
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stdin = os.Stdin
|
||||
if err := cmd.Run(); err != nil {
|
||||
log.Errorf("\nError starting %s: %s", cmd.Args[0], err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -45,10 +45,9 @@ func bootstrapAction(c *cli.Context) error {
|
||||
waitForRoot(cfg)
|
||||
}
|
||||
|
||||
autoformatDevices := cfg.Rancher.State.Autoformat
|
||||
log.Debugf("bootstrapAction: Autoformat(%v)", cfg.Rancher.State.Autoformat)
|
||||
if len(autoformatDevices) > 0 {
|
||||
if err := autoformat(autoformatDevices); err != nil {
|
||||
if len(cfg.Rancher.State.Autoformat) > 0 {
|
||||
log.Infof("bootstrap container: Autoformat(%v) as %s", cfg.Rancher.State.Autoformat, "ext4")
|
||||
if err := autoformat(cfg.Rancher.State.Autoformat); err != nil {
|
||||
log.Errorf("Failed to run autoformat: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package control
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/codegangsta/cli"
|
||||
@@ -14,7 +15,7 @@ func Main() {
|
||||
app := cli.NewApp()
|
||||
|
||||
app.Name = os.Args[0]
|
||||
app.Usage = "Control and configure RancherOS"
|
||||
app.Usage = fmt.Sprintf("Control and configure RancherOS\nbuilt: %s", config.BuildDate)
|
||||
app.Version = config.Version
|
||||
app.Author = "Rancher Labs, Inc."
|
||||
app.EnableBashCompletion = true
|
||||
@@ -101,6 +102,13 @@ func Main() {
|
||||
SkipFlagParsing: true,
|
||||
Action: preloadImagesAction,
|
||||
},
|
||||
{
|
||||
Name: "recovery-init",
|
||||
Hidden: true,
|
||||
HideHelp: true,
|
||||
SkipFlagParsing: true,
|
||||
Action: recoveryInitAction,
|
||||
},
|
||||
{
|
||||
Name: "switch-console",
|
||||
Hidden: true,
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"sort"
|
||||
"strings"
|
||||
"text/template"
|
||||
@@ -76,6 +77,11 @@ func configSubcommands() []cli.Command {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "syslinux",
|
||||
Usage: "edit Syslinux boot global.cfg",
|
||||
Action: editSyslinux,
|
||||
},
|
||||
{
|
||||
Name: "validate",
|
||||
Usage: "validate configuration from stdin",
|
||||
@@ -146,6 +152,17 @@ func env2map(env []string) map[string]string {
|
||||
return m
|
||||
}
|
||||
|
||||
func editSyslinux(c *cli.Context) error {
|
||||
cmd := exec.Command("system-docker", "run", "--rm", "-it",
|
||||
"-v", "/:/host",
|
||||
"-w", "/host",
|
||||
"--entrypoint=vi",
|
||||
"rancher/os-console:"+config.Version,
|
||||
"boot/global.cfg")
|
||||
cmd.Stdout, cmd.Stderr, cmd.Stdin = os.Stdout, os.Stderr, os.Stdin
|
||||
return cmd.Run()
|
||||
}
|
||||
|
||||
func configSet(c *cli.Context) error {
|
||||
if c.NArg() < 2 {
|
||||
return nil
|
||||
@@ -204,7 +221,15 @@ func merge(c *cli.Context) error {
|
||||
}
|
||||
|
||||
if err = config.Merge(bytes); err != nil {
|
||||
log.Fatal(err)
|
||||
log.Error(err)
|
||||
validationErrors, err := config.ValidateBytes(bytes)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
for _, validationError := range validationErrors.Errors() {
|
||||
log.Error(validationError)
|
||||
}
|
||||
log.Fatal("EXITING: Failed to parse configuration")
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -234,7 +259,7 @@ func validate(c *cli.Context) error {
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
validationErrors, err := config.Validate(bytes)
|
||||
validationErrors, err := config.ValidateBytes(bytes)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -2,18 +2,19 @@ package control
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/codegangsta/cli"
|
||||
"github.com/docker/docker/reference"
|
||||
composeConfig "github.com/docker/libcompose/config"
|
||||
"github.com/docker/libcompose/project/options"
|
||||
"github.com/rancher/os/cmd/control/service"
|
||||
"github.com/rancher/os/compose"
|
||||
"github.com/rancher/os/config"
|
||||
"github.com/rancher/os/docker"
|
||||
"github.com/rancher/os/log"
|
||||
"github.com/rancher/os/util"
|
||||
"github.com/rancher/os/util/network"
|
||||
@@ -57,7 +58,7 @@ func consoleSwitch(c *cli.Context) error {
|
||||
|
||||
cfg := config.LoadConfig()
|
||||
validateConsole(newConsole, cfg)
|
||||
if newConsole == currentConsole() {
|
||||
if newConsole == CurrentConsole() {
|
||||
log.Warnf("Console is already set to %s", newConsole)
|
||||
}
|
||||
|
||||
@@ -127,10 +128,10 @@ func consoleEnable(c *cli.Context) error {
|
||||
func consoleList(c *cli.Context) error {
|
||||
cfg := config.LoadConfig()
|
||||
consoles := availableConsoles(cfg)
|
||||
currentConsole := currentConsole()
|
||||
CurrentConsole := CurrentConsole()
|
||||
|
||||
for _, console := range consoles {
|
||||
if console == currentConsole {
|
||||
if console == CurrentConsole {
|
||||
fmt.Printf("current %s\n", console)
|
||||
} else if console == cfg.Rancher.Console {
|
||||
fmt.Printf("enabled %s\n", console)
|
||||
@@ -159,12 +160,32 @@ func availableConsoles(cfg *config.CloudConfig) []string {
|
||||
return consoles
|
||||
}
|
||||
|
||||
func currentConsole() (console string) {
|
||||
consoleBytes, err := ioutil.ReadFile("/run/console-done")
|
||||
if err == nil {
|
||||
console = strings.TrimSpace(string(consoleBytes))
|
||||
} else {
|
||||
// CurrentConsole gets the name of the console that's running
|
||||
func CurrentConsole() (console string) {
|
||||
// TODO: replace this docker container look up with a libcompose service lookup?
|
||||
|
||||
// sudo system-docker inspect --format "{{.Config.Image}}" console
|
||||
client, err := docker.NewSystemClient()
|
||||
if err != nil {
|
||||
log.Warnf("Failed to detect current console: %v", err)
|
||||
return
|
||||
}
|
||||
info, err := client.ContainerInspect(context.Background(), "console")
|
||||
if err != nil {
|
||||
log.Warnf("Failed to detect current console: %v", err)
|
||||
return
|
||||
}
|
||||
// parse image name, then remove os- prefix and the console suffix
|
||||
image, err := reference.ParseNamed(info.Config.Image)
|
||||
if err != nil {
|
||||
log.Warnf("Failed to detect current console(%s): %v", info.Config.Image, err)
|
||||
return
|
||||
}
|
||||
|
||||
if strings.Contains(image.Name(), "os-console") {
|
||||
console = "default"
|
||||
return
|
||||
}
|
||||
console = strings.TrimPrefix(strings.TrimSuffix(image.Name(), "console"), "rancher/os-")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -81,7 +81,7 @@ func consoleInitFunc() error {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
if err := writeRespawn(); err != nil {
|
||||
if err := writeRespawn("rancher", cfg.Rancher.SSH.Daemon, false); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
@@ -91,6 +91,7 @@ func consoleInitFunc() error {
|
||||
|
||||
for _, link := range []symlink{
|
||||
{"/var/lib/rancher/engine/docker", "/usr/bin/docker"},
|
||||
{"/var/lib/rancher/engine/docker-init", "/usr/bin/docker-init"},
|
||||
{"/var/lib/rancher/engine/docker-containerd", "/usr/bin/docker-containerd"},
|
||||
{"/var/lib/rancher/engine/docker-containerd-ctr", "/usr/bin/docker-containerd-ctr"},
|
||||
{"/var/lib/rancher/engine/docker-containerd-shim", "/usr/bin/docker-containerd-shim"},
|
||||
@@ -107,17 +108,7 @@ func consoleInitFunc() error {
|
||||
}
|
||||
|
||||
// font backslashes need to be escaped for when issue is output! (but not the others..)
|
||||
if err := ioutil.WriteFile("/etc/issue", []byte(`
|
||||
, , ______ _ _____ _____TM
|
||||
,------------|'------'| | ___ \\ | | / _ / ___|
|
||||
/ . '-' |- | |_/ /__ _ _ __ ___| |__ ___ _ __ | | | \\ '--.
|
||||
\\/| | | | // _' | '_ \\ / __| '_ \\ / _ \\ '__' | | | |'--. \\
|
||||
| .________.'----' | |\\ \\ (_| | | | | (__| | | | __/ | | \\_/ /\\__/ /
|
||||
| | | | \\_| \\_\\__,_|_| |_|\\___|_| |_|\\___|_| \\___/\\____/
|
||||
\\___/ \\___/ \s \r
|
||||
|
||||
RancherOS `+config.Version+` \n \l
|
||||
`), 0644); err != nil {
|
||||
if err := ioutil.WriteFile("/etc/issue", []byte(config.Banner), 0644); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
@@ -135,7 +126,7 @@ func consoleInitFunc() error {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
if err := ioutil.WriteFile(consoleDone, []byte(cfg.Rancher.Console), 0644); err != nil {
|
||||
if err := ioutil.WriteFile(consoleDone, []byte(CurrentConsole()), 0644); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
@@ -153,17 +144,22 @@ func consoleInitFunc() error {
|
||||
return syscall.Exec(respawnBinPath, []string{"respawn", "-f", "/etc/respawn.conf"}, os.Environ())
|
||||
}
|
||||
|
||||
func generateRespawnConf(cmdline string) string {
|
||||
func generateRespawnConf(cmdline, user string, sshd, recovery bool) string {
|
||||
var respawnConf bytes.Buffer
|
||||
|
||||
autologinBin := "/usr/bin/autologin"
|
||||
if recovery {
|
||||
autologinBin = "/usr/bin/recovery"
|
||||
}
|
||||
|
||||
for i := 1; i < 7; i++ {
|
||||
tty := fmt.Sprintf("tty%d", i)
|
||||
|
||||
respawnConf.WriteString(gettyCmd)
|
||||
if strings.Contains(cmdline, fmt.Sprintf("rancher.autologin=%s", tty)) {
|
||||
respawnConf.WriteString(" --autologin rancher")
|
||||
respawnConf.WriteString(fmt.Sprintf(" -n -l %s -o %s:tty%d", autologinBin, user, i))
|
||||
}
|
||||
respawnConf.WriteString(fmt.Sprintf(" 115200 %s\n", tty))
|
||||
respawnConf.WriteString(fmt.Sprintf(" --noclear %s linux\n", tty))
|
||||
}
|
||||
|
||||
for _, tty := range []string{"ttyS0", "ttyS1", "ttyS2", "ttyS3", "ttyAMA0"} {
|
||||
@@ -173,23 +169,25 @@ func generateRespawnConf(cmdline string) string {
|
||||
|
||||
respawnConf.WriteString(gettyCmd)
|
||||
if strings.Contains(cmdline, fmt.Sprintf("rancher.autologin=%s", tty)) {
|
||||
respawnConf.WriteString(" --autologin rancher")
|
||||
respawnConf.WriteString(fmt.Sprintf(" -n -l %s -o %s:%s", autologinBin, user, tty))
|
||||
}
|
||||
respawnConf.WriteString(fmt.Sprintf(" 115200 %s\n", tty))
|
||||
respawnConf.WriteString(fmt.Sprintf(" %s\n", tty))
|
||||
}
|
||||
|
||||
respawnConf.WriteString("/usr/sbin/sshd -D")
|
||||
if sshd {
|
||||
respawnConf.WriteString("/usr/sbin/sshd -D")
|
||||
}
|
||||
|
||||
return respawnConf.String()
|
||||
}
|
||||
|
||||
func writeRespawn() error {
|
||||
func writeRespawn(user string, sshd, recovery bool) error {
|
||||
cmdline, err := ioutil.ReadFile("/proc/cmdline")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
respawn := generateRespawnConf(string(cmdline))
|
||||
respawn := generateRespawnConf(string(cmdline), user, sshd, recovery)
|
||||
|
||||
files, err := ioutil.ReadDir("/etc/respawn.conf.d")
|
||||
if err == nil {
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/codegangsta/cli"
|
||||
"github.com/rancher/os/config"
|
||||
"github.com/rancher/os/log"
|
||||
"github.com/rancher/os/util"
|
||||
)
|
||||
@@ -22,6 +21,7 @@ const (
|
||||
)
|
||||
|
||||
func dockerInitAction(c *cli.Context) error {
|
||||
// TODO: this should be replaced by a "Console ready event watcher"
|
||||
for {
|
||||
if _, err := os.Stat(consoleDone); err == nil {
|
||||
break
|
||||
@@ -29,21 +29,33 @@ func dockerInitAction(c *cli.Context) error {
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
}
|
||||
|
||||
dockerBin := "/usr/bin/docker"
|
||||
for _, binPath := range []string{
|
||||
dockerBin := ""
|
||||
dockerPaths := []string{
|
||||
"/usr/bin",
|
||||
"/opt/bin",
|
||||
"/usr/local/bin",
|
||||
"/var/lib/rancher/docker",
|
||||
} {
|
||||
}
|
||||
for _, binPath := range dockerPaths {
|
||||
if util.ExistsAndExecutable(path.Join(binPath, "dockerd")) {
|
||||
dockerBin = path.Join(binPath, "dockerd")
|
||||
break
|
||||
}
|
||||
if util.ExistsAndExecutable(path.Join(binPath, "docker")) {
|
||||
dockerBin = path.Join(binPath, "docker")
|
||||
break
|
||||
}
|
||||
if dockerBin == "" {
|
||||
for _, binPath := range dockerPaths {
|
||||
if util.ExistsAndExecutable(path.Join(binPath, "docker")) {
|
||||
dockerBin = path.Join(binPath, "docker")
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if dockerBin == "" {
|
||||
err := fmt.Errorf("Failed to find either dockerd or docker binaries")
|
||||
log.Error(err)
|
||||
return err
|
||||
}
|
||||
log.Infof("Found %s", dockerBin)
|
||||
|
||||
if err := syscall.Mount("", "/", "", syscall.MS_SHARED|syscall.MS_REC, ""); err != nil {
|
||||
log.Error(err)
|
||||
@@ -69,9 +81,8 @@ func dockerInitAction(c *cli.Context) error {
|
||||
fmt.Sprintf(`[ -e %s ] && source %s; exec /usr/bin/dockerlaunch %s %s $DOCKER_OPTS >> %s 2>&1`, dockerConf, dockerConf, dockerBin, strings.Join(c.Args(), " "), dockerLog),
|
||||
}
|
||||
|
||||
cfg := config.LoadConfig()
|
||||
|
||||
if err := ioutil.WriteFile(dockerDone, []byte(cfg.Rancher.Docker.Engine), 0644); err != nil {
|
||||
// TODO: this should be replaced by a "Docker ready event watcher"
|
||||
if err := ioutil.WriteFile(dockerDone, []byte(CurrentEngine()), 0644); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
|
||||
@@ -2,17 +2,18 @@ package control
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/codegangsta/cli"
|
||||
"github.com/docker/docker/reference"
|
||||
"github.com/docker/libcompose/project/options"
|
||||
"github.com/rancher/os/cmd/control/service"
|
||||
"github.com/rancher/os/compose"
|
||||
"github.com/rancher/os/config"
|
||||
"github.com/rancher/os/docker"
|
||||
"github.com/rancher/os/log"
|
||||
"github.com/rancher/os/util"
|
||||
"github.com/rancher/os/util/network"
|
||||
@@ -104,7 +105,7 @@ func engineEnable(c *cli.Context) error {
|
||||
func engineList(c *cli.Context) error {
|
||||
cfg := config.LoadConfig()
|
||||
engines := availableEngines(cfg)
|
||||
currentEngine := currentEngine()
|
||||
currentEngine := CurrentEngine()
|
||||
|
||||
for _, engine := range engines {
|
||||
if engine == currentEngine {
|
||||
@@ -135,12 +136,33 @@ func availableEngines(cfg *config.CloudConfig) []string {
|
||||
return engines
|
||||
}
|
||||
|
||||
func currentEngine() (engine string) {
|
||||
engineBytes, err := ioutil.ReadFile(dockerDone)
|
||||
if err == nil {
|
||||
engine = strings.TrimSpace(string(engineBytes))
|
||||
} else {
|
||||
log.Warnf("Failed to detect current Docker engine: %v", err)
|
||||
// CurrentEngine gets the name of the docker that's running
|
||||
func CurrentEngine() (engine string) {
|
||||
// sudo system-docker inspect --format "{{.Config.Image}}" docker
|
||||
client, err := docker.NewSystemClient()
|
||||
if err != nil {
|
||||
log.Warnf("Failed to detect current docker: %v", err)
|
||||
return
|
||||
}
|
||||
info, err := client.ContainerInspect(context.Background(), "docker")
|
||||
if err != nil {
|
||||
log.Warnf("Failed to detect current docker: %v", err)
|
||||
return
|
||||
}
|
||||
// parse image name, then remove os- prefix and the engine suffix
|
||||
image, err := reference.ParseNamed(info.Config.Image)
|
||||
if err != nil {
|
||||
log.Warnf("Failed to detect current docker(%s): %v", info.Config.Image, err)
|
||||
return
|
||||
}
|
||||
if t, ok := image.(reference.NamedTagged); ok {
|
||||
tag := t.Tag()
|
||||
if !strings.HasPrefix(tag, "1.") {
|
||||
// TODO: this assumes we only do Docker ce :/
|
||||
tag = tag + "-ce"
|
||||
}
|
||||
return "docker-" + tag
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -78,20 +78,9 @@ func writeFiles(cfg *config.CloudConfig) error {
|
||||
}
|
||||
|
||||
func setupCommandSymlinks() {
|
||||
for _, powerOperation := range []string{
|
||||
"/sbin/poweroff",
|
||||
"/sbin/shutdown",
|
||||
"/sbin/reboot",
|
||||
"/sbin/halt",
|
||||
"/usr/sbin/poweroff",
|
||||
"/usr/sbin/shutdown",
|
||||
"/usr/sbin/reboot",
|
||||
"/usr/sbin/halt",
|
||||
} {
|
||||
os.Remove(powerOperation)
|
||||
}
|
||||
|
||||
for _, link := range []symlink{
|
||||
{config.RosBin, "/usr/bin/autologin"},
|
||||
{config.RosBin, "/usr/bin/recovery"},
|
||||
{config.RosBin, "/usr/bin/cloud-init-execute"},
|
||||
{config.RosBin, "/usr/bin/cloud-init-save"},
|
||||
{config.RosBin, "/usr/bin/dockerlaunch"},
|
||||
@@ -99,11 +88,16 @@ func setupCommandSymlinks() {
|
||||
{config.RosBin, "/usr/bin/system-docker"},
|
||||
{config.RosBin, "/usr/sbin/netconf"},
|
||||
{config.RosBin, "/usr/sbin/wait-for-docker"},
|
||||
{config.RosBin, "/usr/sbin/poweroff"},
|
||||
{config.RosBin, "/usr/sbin/reboot"},
|
||||
{config.RosBin, "/usr/sbin/halt"},
|
||||
{config.RosBin, "/usr/sbin/shutdown"},
|
||||
{config.RosBin, "/sbin/poweroff"},
|
||||
{config.RosBin, "/sbin/reboot"},
|
||||
{config.RosBin, "/sbin/halt"},
|
||||
{config.RosBin, "/sbin/shutdown"},
|
||||
} {
|
||||
os.Remove(link.newname)
|
||||
if err := os.Symlink(link.oldname, link.newname); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
@@ -3,12 +3,14 @@ package control
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
@@ -50,6 +52,14 @@ var installCommand = cli.Command{
|
||||
Name: "device, d",
|
||||
Usage: "storage device",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "partition, p",
|
||||
Usage: "partition to install to",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "statedir",
|
||||
Usage: "install to rancher.state.directory",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "force, f",
|
||||
Usage: "[ DANGEROUS! Data loss can happen ] partition/format without prompting",
|
||||
@@ -73,7 +83,7 @@ var installCommand = cli.Command{
|
||||
Hidden: true,
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "kexec",
|
||||
Name: "kexec, k",
|
||||
Usage: "reboot using kexec",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
@@ -84,11 +94,16 @@ var installCommand = cli.Command{
|
||||
}
|
||||
|
||||
func installAction(c *cli.Context) error {
|
||||
if runtime.GOARCH != "amd64" {
|
||||
log.Fatalf("ros install / upgrade only supported on 'amd64', not '%s'", runtime.GOARCH)
|
||||
}
|
||||
|
||||
if c.Args().Present() {
|
||||
log.Fatalf("invalid arguments %v", c.Args())
|
||||
}
|
||||
|
||||
if c.Bool("debug") {
|
||||
debug := c.Bool("debug")
|
||||
if debug {
|
||||
originalLevel := log.GetLevel()
|
||||
defer log.SetLevel(originalLevel)
|
||||
log.SetLevel(log.DebugLevel)
|
||||
@@ -119,6 +134,11 @@ func installAction(c *cli.Context) error {
|
||||
isoinstallerloaded = true // OMG this flag is aweful - kill it with fire
|
||||
}
|
||||
device := c.String("device")
|
||||
partition := c.String("partition")
|
||||
statedir := c.String("statedir")
|
||||
if statedir != "" && installType != "noformat" {
|
||||
log.Fatal("--statedir %s requires --type noformat", statedir)
|
||||
}
|
||||
if installType != "noformat" &&
|
||||
installType != "raid" &&
|
||||
installType != "bootstrap" &&
|
||||
@@ -133,18 +153,20 @@ func installAction(c *cli.Context) error {
|
||||
if cloudConfig == "" {
|
||||
if installType != "upgrade" {
|
||||
// TODO: I wonder if its plausible to merge a new cloud-config into an existing one on upgrade - so for now, i'm only turning off the warning
|
||||
log.Warn("Cloud-config not provided: you might need to provide cloud-config on bootDir with ssh_authorized_keys")
|
||||
log.Warn("Cloud-config not provided: you might need to provide cloud-config on boot with ssh_authorized_keys")
|
||||
}
|
||||
} else {
|
||||
os.MkdirAll("/opt", 0755)
|
||||
uc := "/opt/user_config.yml"
|
||||
if err := util.FileCopy(cloudConfig, uc); err != nil {
|
||||
log.WithFields(log.Fields{"cloudConfig": cloudConfig}).Fatal("Failed to copy cloud-config")
|
||||
log.WithFields(log.Fields{"cloudConfig": cloudConfig, "error": err}).Fatal("Failed to copy cloud-config")
|
||||
}
|
||||
cloudConfig = uc
|
||||
}
|
||||
|
||||
if err := runInstall(image, installType, cloudConfig, device, kappend, force, kexec, isoinstallerloaded); err != nil {
|
||||
if err := runInstall(image, installType, cloudConfig, device, partition, statedir, kappend, force, kexec, isoinstallerloaded, debug); err != nil {
|
||||
log.WithFields(log.Fields{"err": err}).Fatal("Failed to run install")
|
||||
return err
|
||||
}
|
||||
|
||||
if !kexec && reboot && (force || yes("Continue with reboot")) {
|
||||
@@ -155,7 +177,7 @@ func installAction(c *cli.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func runInstall(image, installType, cloudConfig, device, kappend string, force, kexec, isoinstallerloaded bool) error {
|
||||
func runInstall(image, installType, cloudConfig, device, partition, statedir, kappend string, force, kexec, isoinstallerloaded, debug bool) error {
|
||||
fmt.Printf("Installing from %s\n", image)
|
||||
|
||||
if !force {
|
||||
@@ -176,27 +198,26 @@ func runInstall(image, installType, cloudConfig, device, kappend string, force,
|
||||
log.Infof("user specified to install pre v0.8.0: %s", image)
|
||||
imageVersion = strings.Replace(imageVersion, "-", ".", -1)
|
||||
vArray := strings.Split(imageVersion, ".")
|
||||
v, _ := strconv.ParseFloat(vArray[0]+"."+vArray[1], 32)
|
||||
if v < 0.8 || imageVersion == "0.8.0-rc1" {
|
||||
log.Infof("starting installer container for %s", image)
|
||||
if installType == "generic" ||
|
||||
installType == "syslinux" ||
|
||||
installType == "gptsyslinux" {
|
||||
cmd := exec.Command("system-docker", "run", "--net=host", "--privileged", "--volumes-from=all-volumes",
|
||||
"--entrypoint=/scripts/set-disk-partitions", image, device, diskType)
|
||||
cmd.Stdout, cmd.Stderr = os.Stdout, os.Stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
return err
|
||||
if len(vArray) >= 2 {
|
||||
v, _ := strconv.ParseFloat(vArray[0]+"."+vArray[1], 32)
|
||||
if v < 0.8 || imageVersion == "0.8.0-rc1" {
|
||||
log.Infof("starting installer container for %s", image)
|
||||
if installType == "generic" ||
|
||||
installType == "syslinux" ||
|
||||
installType == "gptsyslinux" {
|
||||
cmd := exec.Command("system-docker", "run", "--net=host", "--privileged", "--volumes-from=all-volumes",
|
||||
"--entrypoint=/scripts/set-disk-partitions", image, device, diskType)
|
||||
cmd.Stdout, cmd.Stderr = os.Stdout, os.Stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
cmd := exec.Command("system-docker", "run", "--net=host", "--privileged", "--volumes-from=user-volumes",
|
||||
"--volumes-from=command-volumes", image, "-d", device, "-t", installType, "-c", cloudConfig,
|
||||
"-a", kappend)
|
||||
cmd.Stdout, cmd.Stderr = os.Stdout, os.Stderr
|
||||
return cmd.Run()
|
||||
}
|
||||
cmd := exec.Command("system-docker", "run", "--net=host", "--privileged", "--volumes-from=user-volumes",
|
||||
"--volumes-from=command-volumes", image, "-d", device, "-t", installType, "-c", cloudConfig,
|
||||
"-a", kappend)
|
||||
cmd.Stdout, cmd.Stderr = os.Stdout, os.Stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -266,6 +287,15 @@ func runInstall(image, installType, cloudConfig, device, kappend string, force,
|
||||
if kexec {
|
||||
installerCmd = append(installerCmd, "--kexec")
|
||||
}
|
||||
if debug {
|
||||
installerCmd = append(installerCmd, "--debug")
|
||||
}
|
||||
if partition != "" {
|
||||
installerCmd = append(installerCmd, "--partition", partition)
|
||||
}
|
||||
if statedir != "" {
|
||||
installerCmd = append(installerCmd, "--statedir", statedir)
|
||||
}
|
||||
|
||||
// TODO: mount at /mnt for shared mount?
|
||||
if useIso {
|
||||
@@ -275,10 +305,7 @@ func runInstall(image, installType, cloudConfig, device, kappend string, force,
|
||||
cmd := exec.Command("system-docker", installerCmd...)
|
||||
log.Debugf("Run(%v)", cmd)
|
||||
cmd.Stdout, cmd.Stderr = os.Stdout, os.Stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return cmd.Run()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -288,21 +315,26 @@ func runInstall(image, installType, cloudConfig, device, kappend string, force,
|
||||
|
||||
log.Debugf("running installation")
|
||||
|
||||
if installType == "generic" ||
|
||||
installType == "syslinux" ||
|
||||
installType == "gptsyslinux" {
|
||||
diskType := "msdos"
|
||||
if installType == "gptsyslinux" {
|
||||
diskType = "gpt"
|
||||
if partition == "" {
|
||||
if installType == "generic" ||
|
||||
installType == "syslinux" ||
|
||||
installType == "gptsyslinux" {
|
||||
diskType := "msdos"
|
||||
if installType == "gptsyslinux" {
|
||||
diskType = "gpt"
|
||||
}
|
||||
log.Debugf("running setDiskpartitions")
|
||||
err := setDiskpartitions(device, diskType)
|
||||
if err != nil {
|
||||
log.Errorf("error setDiskpartitions %s", err)
|
||||
return err
|
||||
}
|
||||
// use the bind mounted host filesystem to get access to the /dev/vda1 device that udev on the host sets up (TODO: can we run a udevd inside the container? `mknod b 253 1 /dev/vda1` doesn't work)
|
||||
device = "/host" + device
|
||||
//# TODO: Change this to a number so that users can specify.
|
||||
//# Will need to make it so that our builds and packer APIs remain consistent.
|
||||
partition = device + "1" //${partition:=${device}1}
|
||||
}
|
||||
log.Debugf("running setDiskpartitions")
|
||||
err := setDiskpartitions(device, diskType)
|
||||
if err != nil {
|
||||
log.Errorf("error setDiskpartitions %s", err)
|
||||
return err
|
||||
}
|
||||
// use the bind mounted host filesystem to get access to the /dev/vda1 device that udev on the host sets up (TODO: can we run a udevd inside the container? `mknod b 253 1 /dev/vda1` doesn't work)
|
||||
device = "/host" + device
|
||||
}
|
||||
|
||||
if installType == "upgrade" {
|
||||
@@ -314,11 +346,11 @@ func runInstall(image, installType, cloudConfig, device, kappend string, force,
|
||||
// TODO: detect if its not mounted and then optionally mount?
|
||||
if err := mountBootIso(); err != nil {
|
||||
log.Errorf("error mountBootIso %s", err)
|
||||
return err
|
||||
//return err
|
||||
}
|
||||
}
|
||||
|
||||
err := layDownOS(image, installType, cloudConfig, device, kappend, kexec)
|
||||
err := layDownOS(image, installType, cloudConfig, device, partition, statedir, kappend, kexec)
|
||||
if err != nil {
|
||||
log.Errorf("error layDownOS %s", err)
|
||||
return err
|
||||
@@ -330,48 +362,25 @@ func runInstall(image, installType, cloudConfig, device, kappend string, force,
|
||||
func mountBootIso() error {
|
||||
deviceName := "/dev/sr0"
|
||||
deviceType := "iso9660"
|
||||
{ // force the defer
|
||||
mountsFile, err := os.Open("/proc/mounts")
|
||||
if err != nil {
|
||||
log.Errorf("failed to read /proc/mounts %s", err)
|
||||
return err
|
||||
}
|
||||
defer mountsFile.Close()
|
||||
if d, t := util.Blkid("RancherOS"); d != "" {
|
||||
deviceName = d
|
||||
deviceType = t
|
||||
}
|
||||
|
||||
if partitionMounted(deviceName, mountsFile) {
|
||||
return nil
|
||||
}
|
||||
mountsFile, err := os.Open("/proc/mounts")
|
||||
if err != nil {
|
||||
log.Errorf("failed to read /proc/mounts %s", err)
|
||||
return err
|
||||
}
|
||||
defer mountsFile.Close()
|
||||
|
||||
if partitionMounted(deviceName, mountsFile) {
|
||||
return nil
|
||||
}
|
||||
|
||||
os.MkdirAll("/bootiso", 0755)
|
||||
|
||||
// find the installation device
|
||||
cmd := exec.Command("blkid", "-L", "RancherOS")
|
||||
log.Debugf("Run(%v)", cmd)
|
||||
cmd.Stderr = os.Stderr
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
log.Errorf("Failed to get RancherOS boot device: %s", err)
|
||||
return err
|
||||
}
|
||||
deviceName = strings.TrimSpace(string(out))
|
||||
log.Debugf("blkid found -L RancherOS: %s", deviceName)
|
||||
|
||||
cmd = exec.Command("blkid", deviceName)
|
||||
log.Debugf("Run(%v)", cmd)
|
||||
cmd.Stderr = os.Stderr
|
||||
if out, err = cmd.Output(); err != nil {
|
||||
log.Errorf("Failed to get RancherOS boot device type: %s", err)
|
||||
return err
|
||||
}
|
||||
deviceType = strings.TrimSpace(string(out))
|
||||
s1 := strings.Split(deviceType, "TYPE=\"")
|
||||
s2 := strings.Split(s1[1], "\"")
|
||||
deviceType = s2[0]
|
||||
log.Debugf("blkid type of %s: %s", deviceName, deviceType)
|
||||
|
||||
cmd = exec.Command("mount", "-t", deviceType, deviceName, "/bootiso")
|
||||
log.Debugf("Run(%v)", cmd)
|
||||
cmd := exec.Command("mount", "-t", deviceType, deviceName, "/bootiso")
|
||||
log.Debugf("mount (%#v)", cmd)
|
||||
|
||||
cmd.Stdout, cmd.Stderr = os.Stdout, os.Stderr
|
||||
err = cmd.Run()
|
||||
@@ -383,7 +392,7 @@ func mountBootIso() error {
|
||||
return err
|
||||
}
|
||||
|
||||
func layDownOS(image, installType, cloudConfig, device, kappend string, kexec bool) error {
|
||||
func layDownOS(image, installType, cloudConfig, device, partition, statedir, kappend string, kexec bool) error {
|
||||
// ENV == installType
|
||||
//[[ "$ARCH" == "arm" && "$ENV" != "upgrade" ]] && ENV=arm
|
||||
|
||||
@@ -396,11 +405,10 @@ func layDownOS(image, installType, cloudConfig, device, kappend string, kexec bo
|
||||
//cloudConfig := SCRIPTS_DIR + "/conf/empty.yml" //${cloudConfig:-"${SCRIPTS_DIR}/conf/empty.yml"}
|
||||
CONSOLE := "tty0"
|
||||
baseName := "/mnt/new_img"
|
||||
bootDir := "boot/"
|
||||
//# TODO: Change this to a number so that users can specify.
|
||||
//# Will need to make it so that our builds and packer APIs remain consistent.
|
||||
partition := device + "1" //${partition:=${device}1}
|
||||
kernelArgs := "rancher.state.dev=LABEL=RANCHER_STATE rancher.state.wait" // console="+CONSOLE
|
||||
kernelArgs := "printk.devkmsg=on rancher.state.dev=LABEL=RANCHER_STATE rancher.state.wait" // console="+CONSOLE
|
||||
if statedir != "" {
|
||||
kernelArgs = kernelArgs + " rancher.state.directory=" + statedir
|
||||
}
|
||||
|
||||
// unmount on trap
|
||||
defer util.Unmount(baseName)
|
||||
@@ -418,12 +426,12 @@ func layDownOS(image, installType, cloudConfig, device, kappend string, kexec bo
|
||||
case "generic":
|
||||
log.Debugf("formatAndMount")
|
||||
var err error
|
||||
device, partition, err = formatAndMount(baseName, bootDir, device, partition)
|
||||
device, partition, err = formatAndMount(baseName, device, partition)
|
||||
if err != nil {
|
||||
log.Errorf("formatAndMount %s", err)
|
||||
return err
|
||||
}
|
||||
err = installSyslinux(device, baseName, bootDir, diskType)
|
||||
err = installSyslinux(device, baseName, diskType)
|
||||
if err != nil {
|
||||
log.Errorf("installSyslinux %s", err)
|
||||
return err
|
||||
@@ -435,7 +443,7 @@ func layDownOS(image, installType, cloudConfig, device, kappend string, kexec bo
|
||||
}
|
||||
case "arm":
|
||||
var err error
|
||||
device, partition, err = formatAndMount(baseName, bootDir, device, partition)
|
||||
device, partition, err = formatAndMount(baseName, device, partition)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -445,42 +453,45 @@ func layDownOS(image, installType, cloudConfig, device, kappend string, kexec bo
|
||||
case "amazon-ebs-hvm":
|
||||
CONSOLE = "ttyS0"
|
||||
var err error
|
||||
device, partition, err = formatAndMount(baseName, bootDir, device, partition)
|
||||
device, partition, err = formatAndMount(baseName, device, partition)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if installType == "amazon-ebs-hvm" {
|
||||
installSyslinux(device, baseName, bootDir, diskType)
|
||||
installSyslinux(device, baseName, diskType)
|
||||
}
|
||||
//# AWS Networking recommends disabling.
|
||||
seedData(baseName, cloudConfig, FILES)
|
||||
case "googlecompute":
|
||||
CONSOLE = "ttyS0"
|
||||
var err error
|
||||
device, partition, err = formatAndMount(baseName, bootDir, device, partition)
|
||||
device, partition, err = formatAndMount(baseName, device, partition)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
installSyslinux(device, baseName, bootDir, diskType)
|
||||
installSyslinux(device, baseName, diskType)
|
||||
seedData(baseName, cloudConfig, FILES)
|
||||
case "noformat":
|
||||
var err error
|
||||
device, partition, err = mountdevice(baseName, bootDir, partition, false)
|
||||
device, partition, err = install.MountDevice(baseName, device, partition, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
installSyslinux(device, baseName, bootDir, diskType)
|
||||
installSyslinux(device, baseName, diskType)
|
||||
if err := os.MkdirAll(filepath.Join(baseName, statedir), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
case "raid":
|
||||
var err error
|
||||
device, partition, err = mountdevice(baseName, bootDir, partition, false)
|
||||
device, partition, err = install.MountDevice(baseName, device, partition, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
installSyslinux(device, baseName, bootDir, diskType)
|
||||
installSyslinux(device, baseName, diskType)
|
||||
case "bootstrap":
|
||||
CONSOLE = "ttyS0"
|
||||
var err error
|
||||
device, partition, err = mountdevice(baseName, bootDir, partition, true)
|
||||
device, partition, err = install.MountDevice(baseName, device, partition, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -490,59 +501,47 @@ func layDownOS(image, installType, cloudConfig, device, kappend string, kexec bo
|
||||
fallthrough
|
||||
case "upgrade":
|
||||
var err error
|
||||
device, partition, err = mountdevice(baseName, bootDir, partition, false)
|
||||
device, partition, err = install.MountDevice(baseName, device, partition, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("upgrading - %s, %s, %s, %s", device, baseName, bootDir, diskType)
|
||||
log.Debugf("upgrading - %s, %s, %s, %s", device, baseName, diskType)
|
||||
// TODO: detect pv-grub, and don't kill it with syslinux
|
||||
upgradeBootloader(device, baseName, bootDir, diskType)
|
||||
upgradeBootloader(device, baseName, diskType)
|
||||
default:
|
||||
return fmt.Errorf("unexpected install type %s", installType)
|
||||
}
|
||||
kernelArgs = kernelArgs + " console=" + CONSOLE
|
||||
|
||||
if kappend == "" {
|
||||
preservedAppend, _ := ioutil.ReadFile(filepath.Join(baseName, bootDir+"append"))
|
||||
preservedAppend, _ := ioutil.ReadFile(filepath.Join(baseName, install.BootDir+"append"))
|
||||
kappend = string(preservedAppend)
|
||||
} else {
|
||||
ioutil.WriteFile(filepath.Join(baseName, bootDir+"append"), []byte(kappend), 0644)
|
||||
ioutil.WriteFile(filepath.Join(baseName, install.BootDir+"append"), []byte(kappend), 0644)
|
||||
}
|
||||
|
||||
if installType == "amazon-ebs-pv" {
|
||||
menu := install.BootVars{
|
||||
BaseName: baseName,
|
||||
BootDir: bootDir,
|
||||
BootDir: install.BootDir,
|
||||
Timeout: 0,
|
||||
Fallback: 0, // need to be conditional on there being a 'rollback'?
|
||||
Entries: []install.MenuEntry{
|
||||
install.MenuEntry{"RancherOS-current", bootDir, VERSION, kernelArgs, kappend},
|
||||
install.MenuEntry{"RancherOS-current", install.BootDir, VERSION, kernelArgs, kappend},
|
||||
},
|
||||
}
|
||||
install.PvGrubConfig(menu)
|
||||
}
|
||||
log.Debugf("installRancher")
|
||||
err := installRancher(baseName, bootDir, VERSION, DIST, kernelArgs+" "+kappend)
|
||||
_, err := installRancher(baseName, VERSION, DIST, kernelArgs+" "+kappend)
|
||||
if err != nil {
|
||||
log.Errorf("%s", err)
|
||||
return err
|
||||
}
|
||||
log.Debugf("installRancher done")
|
||||
|
||||
// Used by upgrade
|
||||
if kexec {
|
||||
// kexec -l ${DIST}/vmlinuz --initrd=${DIST}/initrd --append="${kernelArgs} ${APPEND}" -f
|
||||
cmd := exec.Command("kexec", "-l "+DIST+"/vmlinuz",
|
||||
"--initrd="+DIST+"/initrd",
|
||||
"--append='"+kernelArgs+" "+kappend+"'",
|
||||
"-f")
|
||||
log.Debugf("Run(%v)", cmd)
|
||||
cmd.Stderr = os.Stderr
|
||||
if _, err := cmd.Output(); err != nil {
|
||||
log.Errorf("Failed to kexec: %s", err)
|
||||
return err
|
||||
}
|
||||
log.Infof("kexec'd to new install")
|
||||
power.Kexec(false, filepath.Join(baseName, install.BootDir), kernelArgs+" "+kappend)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -556,7 +555,7 @@ func seedData(baseName, cloudData string, files []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = os.MkdirAll(filepath.Join(baseName, "/var/lib/rancher/conf/cloud-config.d"), 0755); err != nil {
|
||||
if err = os.MkdirAll(filepath.Join(baseName, "/var/lib/rancher/conf/cloud-config.d"), 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -682,11 +681,7 @@ func setDiskpartitions(device, diskType string) error {
|
||||
log.Errorf("parted: %s", err)
|
||||
return err
|
||||
}
|
||||
if err := setBootable(device, diskType); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return setBootable(device, diskType)
|
||||
}
|
||||
|
||||
func partitionMounted(device string, file io.Reader) bool {
|
||||
@@ -715,7 +710,7 @@ func formatdevice(device, partition string) error {
|
||||
// -O ^64bit: for syslinux: http://www.syslinux.org/wiki/index.php?title=Filesystem#ext
|
||||
cmd := exec.Command("mkfs.ext4", "-F", "-i", "4096", "-O", "^64bit", "-L", "RANCHER_STATE", partition)
|
||||
log.Debugf("Run(%v)", cmd)
|
||||
//cmd.Stdout, cmd.Stderr = os.Stdout, os.Stderr
|
||||
cmd.Stdout, cmd.Stderr = os.Stdout, os.Stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
log.Errorf("mkfs.ext4: %s", err)
|
||||
return err
|
||||
@@ -723,57 +718,7 @@ func formatdevice(device, partition string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func mountdevice(baseName, bootDir, partition string, raw bool) (string, string, error) {
|
||||
log.Debugf("mountdevice %s, raw %v", partition, raw)
|
||||
|
||||
if raw {
|
||||
log.Debugf("util.Mount (raw) %s, %s", partition, baseName)
|
||||
|
||||
cmd := exec.Command("lsblk", "-no", "pkname", partition)
|
||||
log.Debugf("Run(%v)", cmd)
|
||||
cmd.Stderr = os.Stderr
|
||||
device := ""
|
||||
if out, err := cmd.Output(); err == nil {
|
||||
device = "/dev/" + strings.TrimSpace(string(out))
|
||||
}
|
||||
|
||||
return device, partition, util.Mount(partition, baseName, "", "")
|
||||
}
|
||||
|
||||
//rootfs := partition
|
||||
// Don't use ResolveDevice - it can fail, whereas `blkid -L LABEL` works more often
|
||||
//if dev := util.ResolveDevice("LABEL=RANCHER_BOOT"); dev != "" {
|
||||
cmd := exec.Command("blkid", "-L", "RANCHER_BOOT")
|
||||
log.Debugf("Run(%v)", cmd)
|
||||
cmd.Stderr = os.Stderr
|
||||
if out, err := cmd.Output(); err == nil {
|
||||
partition = strings.TrimSpace(string(out))
|
||||
baseName = filepath.Join(baseName, "boot")
|
||||
} else {
|
||||
cmd := exec.Command("blkid", "-L", "RANCHER_STATE")
|
||||
log.Debugf("Run(%v)", cmd)
|
||||
cmd.Stderr = os.Stderr
|
||||
if out, err := cmd.Output(); err == nil {
|
||||
partition = strings.TrimSpace(string(out))
|
||||
}
|
||||
}
|
||||
device := ""
|
||||
cmd = exec.Command("lsblk", "-no", "pkname", partition)
|
||||
log.Debugf("Run(%v)", cmd)
|
||||
cmd.Stderr = os.Stderr
|
||||
if out, err := cmd.Output(); err == nil {
|
||||
device = "/dev/" + strings.TrimSpace(string(out))
|
||||
}
|
||||
|
||||
log.Debugf("util.Mount %s, %s", partition, baseName)
|
||||
os.MkdirAll(baseName, 0755)
|
||||
cmd = exec.Command("mount", partition, baseName)
|
||||
log.Debugf("Run(%v)", cmd)
|
||||
//cmd.Stdout, cmd.Stderr = os.Stdout, os.Stderr
|
||||
return device, partition, cmd.Run()
|
||||
}
|
||||
|
||||
func formatAndMount(baseName, bootDir, device, partition string) (string, string, error) {
|
||||
func formatAndMount(baseName, device, partition string) (string, string, error) {
|
||||
log.Debugf("formatAndMount")
|
||||
|
||||
err := formatdevice(device, partition)
|
||||
@@ -781,31 +726,14 @@ func formatAndMount(baseName, bootDir, device, partition string) (string, string
|
||||
log.Errorf("formatdevice %s", err)
|
||||
return device, partition, err
|
||||
}
|
||||
device, partition, err = mountdevice(baseName, bootDir, partition, false)
|
||||
device, partition, err = install.MountDevice(baseName, device, partition, false)
|
||||
if err != nil {
|
||||
log.Errorf("mountdevice %s", err)
|
||||
return device, partition, err
|
||||
}
|
||||
//err = createbootDirs(baseName, bootDir)
|
||||
//if err != nil {
|
||||
// log.Errorf("createbootDirs %s", err)
|
||||
// return bootDir, err
|
||||
//}
|
||||
return device, partition, nil
|
||||
}
|
||||
|
||||
func NOPEcreatebootDir(baseName, bootDir string) error {
|
||||
log.Debugf("createbootDirs")
|
||||
|
||||
if err := os.MkdirAll(filepath.Join(baseName, bootDir+"grub"), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.MkdirAll(filepath.Join(baseName, bootDir+"syslinux"), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func setBootable(device, diskType string) error {
|
||||
// TODO make conditional - if there is a bootable device already, don't break it
|
||||
// TODO: make RANCHER_BOOT bootable - it might not be device 1
|
||||
@@ -824,10 +752,10 @@ func setBootable(device, diskType string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func upgradeBootloader(device, baseName, bootDir, diskType string) error {
|
||||
func upgradeBootloader(device, baseName, diskType string) error {
|
||||
log.Debugf("start upgradeBootloader")
|
||||
|
||||
grubDir := filepath.Join(baseName, bootDir+"grub")
|
||||
grubDir := filepath.Join(baseName, install.BootDir+"grub")
|
||||
if _, err := os.Stat(grubDir); os.IsNotExist(err) {
|
||||
log.Debugf("%s does not exist - no need to upgrade bootloader", grubDir)
|
||||
// we've already upgraded
|
||||
@@ -835,12 +763,12 @@ func upgradeBootloader(device, baseName, bootDir, diskType string) error {
|
||||
return nil
|
||||
}
|
||||
// deal with systems which were previously upgraded, then rolled back, and are now being re-upgraded
|
||||
grubBackup := filepath.Join(baseName, bootDir+"grub_backup")
|
||||
grubBackup := filepath.Join(baseName, install.BootDir+"grub_backup")
|
||||
if err := os.RemoveAll(grubBackup); err != nil {
|
||||
log.Errorf("RemoveAll (%s): %s", grubBackup, err)
|
||||
return err
|
||||
}
|
||||
backupSyslinuxDir := filepath.Join(baseName, bootDir+"syslinux_backup")
|
||||
backupSyslinuxDir := filepath.Join(baseName, install.BootDir+"syslinux_backup")
|
||||
if _, err := os.Stat(backupSyslinuxDir); !os.IsNotExist(err) {
|
||||
backupSyslinuxLdlinuxSys := filepath.Join(backupSyslinuxDir, "ldlinux.sys")
|
||||
if _, err := os.Stat(backupSyslinuxLdlinuxSys); !os.IsNotExist(err) {
|
||||
@@ -863,7 +791,7 @@ func upgradeBootloader(device, baseName, bootDir, diskType string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
syslinuxDir := filepath.Join(baseName, bootDir+"syslinux")
|
||||
syslinuxDir := filepath.Join(baseName, install.BootDir+"syslinux")
|
||||
// it seems that v0.5.0 didn't have a syslinux dir, while 0.7 does
|
||||
if _, err := os.Stat(syslinuxDir); !os.IsNotExist(err) {
|
||||
if err := os.Rename(syslinuxDir, backupSyslinuxDir); err != nil {
|
||||
@@ -884,14 +812,15 @@ func upgradeBootloader(device, baseName, bootDir, diskType string) error {
|
||||
|
||||
cfg = strings.Replace(cfg, "current", "previous", -1)
|
||||
// TODO consider removing the APPEND line - as the global.cfg should have the same result
|
||||
ioutil.WriteFile(filepath.Join(baseName, bootDir, "linux-current.cfg"), []byte(cfg), 0644)
|
||||
ioutil.WriteFile(filepath.Join(baseName, install.BootDir, "linux-current.cfg"), []byte(cfg), 0644)
|
||||
|
||||
lines := strings.Split(cfg, "\n")
|
||||
for _, line := range lines {
|
||||
line = strings.TrimSpace(line)
|
||||
if strings.HasPrefix(line, "APPEND") {
|
||||
log.Errorf("write new (%s) %s", filepath.Join(baseName, install.BootDir, "global.cfg"), err)
|
||||
// TODO: need to append any extra's the user specified
|
||||
ioutil.WriteFile(filepath.Join(baseName, bootDir, "global.cfg"), []byte(cfg), 0644)
|
||||
ioutil.WriteFile(filepath.Join(baseName, install.BootDir, "global.cfg"), []byte(cfg), 0644)
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -899,10 +828,11 @@ func upgradeBootloader(device, baseName, bootDir, diskType string) error {
|
||||
}
|
||||
}
|
||||
|
||||
return installSyslinux(device, baseName, bootDir, diskType)
|
||||
return installSyslinux(device, baseName, diskType)
|
||||
}
|
||||
|
||||
func installSyslinux(device, baseName, bootDir, diskType string) error {
|
||||
func installSyslinux(device, baseName, diskType string) error {
|
||||
log.Debugf("installSyslinux(%s)", device)
|
||||
|
||||
mbrFile := "mbr.bin"
|
||||
if diskType == "gpt" {
|
||||
@@ -951,7 +881,7 @@ func installSyslinux(device, baseName, bootDir, diskType string) error {
|
||||
}
|
||||
}
|
||||
|
||||
sysLinuxDir := filepath.Join(baseName, bootDir, "syslinux")
|
||||
sysLinuxDir := filepath.Join(baseName, install.BootDir, "syslinux")
|
||||
if err := os.MkdirAll(sysLinuxDir, 0755); err != nil {
|
||||
log.Errorf("MkdirAll(%s)): %s", sysLinuxDir, err)
|
||||
//return err
|
||||
@@ -984,19 +914,45 @@ func installSyslinux(device, baseName, bootDir, diskType string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func installRancher(baseName, bootDir, VERSION, DIST, kappend string) error {
|
||||
func different(existing, new string) bool {
|
||||
// assume existing file exists
|
||||
if _, err := os.Stat(new); os.IsNotExist(err) {
|
||||
return true
|
||||
}
|
||||
data, err := ioutil.ReadFile(existing)
|
||||
if err != nil {
|
||||
return true
|
||||
}
|
||||
newData, err := ioutil.ReadFile(new)
|
||||
if err != nil {
|
||||
return true
|
||||
}
|
||||
md5sum := md5.Sum(data)
|
||||
newmd5sum := md5.Sum(newData)
|
||||
if md5sum != newmd5sum {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func installRancher(baseName, VERSION, DIST, kappend string) (string, error) {
|
||||
log.Debugf("installRancher")
|
||||
|
||||
// detect if there already is a linux-current.cfg, if so, move it to linux-previous.cfg,
|
||||
currentCfg := filepath.Join(baseName, bootDir, "linux-current.cfg")
|
||||
currentCfg := filepath.Join(baseName, install.BootDir, "linux-current.cfg")
|
||||
if _, err := os.Stat(currentCfg); !os.IsNotExist(err) {
|
||||
previousCfg := filepath.Join(baseName, bootDir, "linux-previous.cfg")
|
||||
if _, err := os.Stat(previousCfg); !os.IsNotExist(err) {
|
||||
if err := os.Remove(previousCfg); err != nil {
|
||||
return err
|
||||
existingCfg := filepath.Join(DIST, "linux-current.cfg")
|
||||
// only remove previous if there is a change to the current
|
||||
if different(currentCfg, existingCfg) {
|
||||
previousCfg := filepath.Join(baseName, install.BootDir, "linux-previous.cfg")
|
||||
if _, err := os.Stat(previousCfg); !os.IsNotExist(err) {
|
||||
if err := os.Remove(previousCfg); err != nil {
|
||||
return currentCfg, err
|
||||
}
|
||||
}
|
||||
os.Rename(currentCfg, previousCfg)
|
||||
// TODO: now that we're parsing syslinux.cfg files, maybe we can delete old kernels and initrds
|
||||
}
|
||||
os.Rename(currentCfg, previousCfg)
|
||||
}
|
||||
|
||||
// The image/ISO have all the files in it - the syslinux cfg's and the kernel&initrd, so we can copy them all from there
|
||||
@@ -1005,26 +961,36 @@ func installRancher(baseName, bootDir, VERSION, DIST, kappend string) error {
|
||||
if file.IsDir() {
|
||||
continue
|
||||
}
|
||||
if err := dfs.CopyFile(filepath.Join(DIST, file.Name()), filepath.Join(baseName, bootDir), file.Name()); err != nil {
|
||||
// TODO: should overwrite anything other than the global.cfg
|
||||
overwrite := true
|
||||
if file.Name() == "global.cfg" {
|
||||
overwrite = false
|
||||
}
|
||||
if err := dfs.CopyFileOverwrite(filepath.Join(DIST, file.Name()), filepath.Join(baseName, install.BootDir), file.Name(), overwrite); err != nil {
|
||||
log.Errorf("copy %s: %s", file.Name(), err)
|
||||
//return err
|
||||
}
|
||||
log.Debugf("copied %s to %s as %s", filepath.Join(DIST, file.Name()), filepath.Join(baseName, bootDir), file.Name())
|
||||
}
|
||||
|
||||
// the general INCLUDE syslinuxcfg
|
||||
if err := dfs.CopyFile(filepath.Join(DIST, "isolinux", "isolinux.cfg"), filepath.Join(baseName, bootDir, "syslinux"), "syslinux.cfg"); err != nil {
|
||||
isolinuxFile := filepath.Join(DIST, "isolinux", "isolinux.cfg")
|
||||
syslinuxDir := filepath.Join(baseName, install.BootDir, "syslinux")
|
||||
if err := dfs.CopyFileOverwrite(isolinuxFile, syslinuxDir, "syslinux.cfg", true); err != nil {
|
||||
log.Errorf("copy global syslinux.cfgS%s: %s", "syslinux.cfg", err)
|
||||
//return err
|
||||
} else {
|
||||
log.Debugf("installRancher copy global syslinux.cfgS OK")
|
||||
|
||||
}
|
||||
|
||||
// The global.cfg INCLUDE - useful for over-riding the APPEND line
|
||||
globalFile := filepath.Join(filepath.Join(baseName, bootDir), "global.cfg")
|
||||
globalFile := filepath.Join(filepath.Join(baseName, install.BootDir), "global.cfg")
|
||||
if _, err := os.Stat(globalFile); !os.IsNotExist(err) {
|
||||
err := ioutil.WriteFile(globalFile, []byte("APPEND "+kappend), 0644)
|
||||
if err != nil {
|
||||
log.Errorf("write (%s) %s", "global.cfg", err)
|
||||
return err
|
||||
return currentCfg, err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return currentCfg, nil
|
||||
}
|
||||
|
||||
@@ -1,5 +1,18 @@
|
||||
package install
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/rancher/os/config"
|
||||
"github.com/rancher/os/log"
|
||||
"github.com/rancher/os/util"
|
||||
)
|
||||
|
||||
const BootDir = "boot/"
|
||||
|
||||
type MenuEntry struct {
|
||||
Name, BootDir, Version, KernelArgs, Append string
|
||||
}
|
||||
@@ -9,3 +22,55 @@ type BootVars struct {
|
||||
Fallback int
|
||||
Entries []MenuEntry
|
||||
}
|
||||
|
||||
func MountDevice(baseName, device, partition string, raw bool) (string, string, error) {
|
||||
log.Debugf("mountdevice %s, raw %v", partition, raw)
|
||||
|
||||
if partition == "" {
|
||||
if raw {
|
||||
log.Debugf("util.Mount (raw) %s, %s", partition, baseName)
|
||||
|
||||
cmd := exec.Command("lsblk", "-no", "pkname", partition)
|
||||
log.Debugf("Run(%v)", cmd)
|
||||
cmd.Stderr = os.Stderr
|
||||
device := ""
|
||||
// TODO: out can == "" - this is used to "detect software RAID" which is terrible
|
||||
if out, err := cmd.Output(); err == nil {
|
||||
device = "/dev/" + strings.TrimSpace(string(out))
|
||||
}
|
||||
|
||||
log.Debugf("mountdevice return -> d: %s, p: %s", device, partition)
|
||||
return device, partition, util.Mount(partition, baseName, "", "")
|
||||
}
|
||||
|
||||
//rootfs := partition
|
||||
// Don't use ResolveDevice - it can fail, whereas `blkid -L LABEL` works more often
|
||||
|
||||
cfg := config.LoadConfig()
|
||||
if d, _ := util.Blkid("RANCHER_BOOT"); d != "" {
|
||||
partition = d
|
||||
baseName = filepath.Join(baseName, BootDir)
|
||||
} else {
|
||||
if dev := util.ResolveDevice(cfg.Rancher.State.Dev); dev != "" {
|
||||
// try the rancher.state.dev setting
|
||||
partition = dev
|
||||
} else {
|
||||
if d, _ := util.Blkid("RANCHER_STATE"); d != "" {
|
||||
partition = d
|
||||
}
|
||||
}
|
||||
}
|
||||
cmd := exec.Command("lsblk", "-no", "pkname", partition)
|
||||
log.Debugf("Run(%v)", cmd)
|
||||
cmd.Stderr = os.Stderr
|
||||
// TODO: out can == "" - this is used to "detect software RAID" which is terrible
|
||||
if out, err := cmd.Output(); err == nil {
|
||||
device = "/dev/" + strings.TrimSpace(string(out))
|
||||
}
|
||||
}
|
||||
os.MkdirAll(baseName, 0755)
|
||||
cmd := exec.Command("mount", partition, baseName)
|
||||
//cmd.Stdout, cmd.Stderr = os.Stdout, os.Stderr
|
||||
log.Debugf("mountdevice return2 -> d: %s, p: %s", device, partition)
|
||||
return device, partition, cmd.Run()
|
||||
}
|
||||
|
||||
@@ -1,9 +1,13 @@
|
||||
package install
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"html/template"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/rancher/os/log"
|
||||
)
|
||||
@@ -43,3 +47,48 @@ DEFAULT RancherOS-current
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ReadGlobalCfg(globalCfg string) (string, error) {
|
||||
append := ""
|
||||
buf, err := ioutil.ReadFile(globalCfg)
|
||||
if err != nil {
|
||||
return append, err
|
||||
}
|
||||
|
||||
s := bufio.NewScanner(bytes.NewReader(buf))
|
||||
for s.Scan() {
|
||||
line := strings.TrimSpace(s.Text())
|
||||
if strings.HasPrefix(line, "APPEND") {
|
||||
append = strings.TrimSpace(strings.TrimPrefix(line, "APPEND"))
|
||||
}
|
||||
}
|
||||
return append, nil
|
||||
}
|
||||
|
||||
func ReadSyslinuxCfg(currentCfg string) (string, string, error) {
|
||||
vmlinuzFile := ""
|
||||
initrdFile := ""
|
||||
// Need to parse currentCfg for the lines:
|
||||
// KERNEL ../vmlinuz-4.9.18-rancher^M
|
||||
// INITRD ../initrd-41e02e6-dirty^M
|
||||
buf, err := ioutil.ReadFile(currentCfg)
|
||||
if err != nil {
|
||||
return vmlinuzFile, initrdFile, err
|
||||
}
|
||||
|
||||
DIST := filepath.Dir(currentCfg)
|
||||
|
||||
s := bufio.NewScanner(bytes.NewReader(buf))
|
||||
for s.Scan() {
|
||||
line := strings.TrimSpace(s.Text())
|
||||
if strings.HasPrefix(line, "KERNEL") {
|
||||
vmlinuzFile = strings.TrimSpace(strings.TrimPrefix(line, "KERNEL"))
|
||||
vmlinuzFile = filepath.Join(DIST, filepath.Base(vmlinuzFile))
|
||||
}
|
||||
if strings.HasPrefix(line, "INITRD") {
|
||||
initrdFile = strings.TrimSpace(strings.TrimPrefix(line, "INITRD"))
|
||||
initrdFile = filepath.Join(DIST, filepath.Base(initrdFile))
|
||||
}
|
||||
}
|
||||
return vmlinuzFile, initrdFile, err
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
@@ -52,7 +53,7 @@ func osSubcommands() []cli.Command {
|
||||
Usage: "do not reboot after upgrade",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "kexec",
|
||||
Name: "kexec, k",
|
||||
Usage: "reboot using kexec",
|
||||
},
|
||||
cli.StringFlag{
|
||||
@@ -63,6 +64,10 @@ func osSubcommands() []cli.Command {
|
||||
Name: "upgrade-console",
|
||||
Usage: "upgrade console even if persistent",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "debug",
|
||||
Usage: "Run installer with debug output",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -167,6 +172,10 @@ func getLatestImage() (string, error) {
|
||||
}
|
||||
|
||||
func osUpgrade(c *cli.Context) error {
|
||||
if runtime.GOARCH != "amd64" {
|
||||
log.Fatalf("ros install / upgrade only supported on 'amd64', not '%s'", runtime.GOARCH)
|
||||
}
|
||||
|
||||
image := c.String("image")
|
||||
|
||||
if image == "" {
|
||||
@@ -182,7 +191,16 @@ func osUpgrade(c *cli.Context) error {
|
||||
if c.Args().Present() {
|
||||
log.Fatalf("invalid arguments %v", c.Args())
|
||||
}
|
||||
if err := startUpgradeContainer(image, c.Bool("stage"), c.Bool("force"), !c.Bool("no-reboot"), c.Bool("kexec"), c.Bool("upgrade-console"), c.String("append")); err != nil {
|
||||
if err := startUpgradeContainer(
|
||||
image,
|
||||
c.Bool("stage"),
|
||||
c.Bool("force"),
|
||||
!c.Bool("no-reboot"),
|
||||
c.Bool("kexec"),
|
||||
c.Bool("upgrade-console"),
|
||||
c.Bool("debug"),
|
||||
c.String("append"),
|
||||
); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -194,14 +212,17 @@ func osVersion(c *cli.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func startUpgradeContainer(image string, stage, force, reboot, kexec bool, upgradeConsole bool, kernelArgs string) error {
|
||||
func startUpgradeContainer(image string, stage, force, reboot, kexec, debug bool, upgradeConsole bool, kernelArgs string) error {
|
||||
command := []string{
|
||||
"-t", "rancher-upgrade",
|
||||
"-r", config.Version,
|
||||
}
|
||||
|
||||
if kexec {
|
||||
command = append(command, "-k")
|
||||
command = append(command, "--kexec")
|
||||
}
|
||||
if debug {
|
||||
command = append(command, "--debug")
|
||||
}
|
||||
|
||||
kernelArgs = strings.TrimSpace(kernelArgs)
|
||||
|
||||
23
cmd/control/recovery_init.go
Normal file
23
cmd/control/recovery_init.go
Normal file
@@ -0,0 +1,23 @@
|
||||
package control
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"syscall"
|
||||
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/codegangsta/cli"
|
||||
)
|
||||
|
||||
func recoveryInitAction(c *cli.Context) error {
|
||||
if err := writeRespawn("root", false, true); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
respawnBinPath, err := exec.LookPath("respawn")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return syscall.Exec(respawnBinPath, []string{"respawn", "-f", "/etc/respawn.conf"}, os.Environ())
|
||||
}
|
||||
@@ -11,13 +11,11 @@ import (
|
||||
func selinuxCommand() cli.Command {
|
||||
app := cli.Command{}
|
||||
app.Name = "selinux"
|
||||
app.Usage = "Launch SELinux tools container."
|
||||
app.Action = func(c *cli.Context) error {
|
||||
argv := []string{"system-docker", "run", "-it", "--privileged", "--rm",
|
||||
"--net", "host", "--pid", "host", "--ipc", "host",
|
||||
"-v", "/usr/bin/docker:/usr/bin/docker.dist:ro",
|
||||
"-v", "/usr/bin/ros:/usr/bin/dockerlaunch:ro",
|
||||
"-v", "/usr/bin/ros:/usr/bin/user-docker:ro",
|
||||
"-v", "/usr/bin/ros:/usr/bin/system-docker:ro",
|
||||
"-v", "/usr/bin/ros:/sbin/poweroff:ro",
|
||||
"-v", "/usr/bin/ros:/sbin/reboot:ro",
|
||||
|
||||
@@ -36,7 +36,6 @@ func Commands() cli.Command {
|
||||
app := cli.Command{}
|
||||
app.Name = "service"
|
||||
app.ShortName = "s"
|
||||
app.Usage = "Command line interface for services and compose."
|
||||
app.Before = beforeApp
|
||||
app.Flags = append(dockerApp.DockerClientFlags(), cli.BoolFlag{
|
||||
Name: "verbose,debug",
|
||||
@@ -208,9 +207,17 @@ func IsLocalOrURL(service string) bool {
|
||||
return isLocal(service) || strings.HasPrefix(service, "http:/") || strings.HasPrefix(service, "https:/")
|
||||
}
|
||||
|
||||
func validateService(service string, cfg *config.CloudConfig) {
|
||||
// ValidService checks to see if the service definition exists
|
||||
func ValidService(service string, cfg *config.CloudConfig) bool {
|
||||
services := availableService(cfg)
|
||||
if !IsLocalOrURL(service) && !util.Contains(services, service) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func validateService(service string, cfg *config.CloudConfig) {
|
||||
if !ValidService(service, cfg) {
|
||||
log.Fatalf("%s is not a valid service", service)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -76,11 +76,7 @@ func writeCerts(generateServer bool, hostname []string, certPath, keyPath, caCer
|
||||
if err := config.Set("rancher.docker.server_cert", string(cert)); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := config.Set("rancher.docker.server_key", string(key)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return config.Set("rancher.docker.server_key", string(key))
|
||||
}
|
||||
|
||||
func writeCaCerts(cfg *config.CloudConfig, caCertPath, caKeyPath string) error {
|
||||
|
||||
@@ -25,7 +25,6 @@ import (
|
||||
const (
|
||||
defaultStorageContext = "console"
|
||||
dockerPidFile = "/var/run/docker.pid"
|
||||
userDocker = "user-docker"
|
||||
sourceDirectory = "/engine"
|
||||
destDirectory = "/var/lib/rancher/engine"
|
||||
)
|
||||
|
||||
24
cmd/network/network.go
Normal file → Executable file
24
cmd/network/network.go
Normal file → Executable file
@@ -1,6 +1,9 @@
|
||||
package network
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/rancher/os/docker"
|
||||
"github.com/rancher/os/log"
|
||||
|
||||
"github.com/docker/libnetwork/resolvconf"
|
||||
@@ -11,15 +14,25 @@ import (
|
||||
|
||||
func Main() {
|
||||
log.InitLogger()
|
||||
log.Infof("Running network")
|
||||
|
||||
cfg := config.LoadConfig()
|
||||
ApplyNetworkConfig(cfg)
|
||||
|
||||
log.Infof("Restart syslog")
|
||||
client, err := docker.NewSystemClient()
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
if err := client.ContainerRestart(context.Background(), "syslog", 10); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
select {}
|
||||
}
|
||||
|
||||
func ApplyNetworkConfig(cfg *config.CloudConfig) {
|
||||
log.Infof("Apply Network Config")
|
||||
nameservers := cfg.Rancher.Network.DNS.Nameservers
|
||||
search := cfg.Rancher.Network.DNS.Search
|
||||
userSetDNS := len(nameservers) > 0 || len(search) > 0
|
||||
@@ -28,6 +41,8 @@ func ApplyNetworkConfig(cfg *config.CloudConfig) {
|
||||
search = cfg.Rancher.Defaults.Network.DNS.Search
|
||||
}
|
||||
|
||||
// TODO: don't write to the file if nameservers is still empty
|
||||
log.Infof("Writing resolv.conf (%v) %v", nameservers, search)
|
||||
if _, err := resolvconf.Build("/etc/resolv.conf", nameservers, search, nil); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
@@ -36,15 +51,12 @@ func ApplyNetworkConfig(cfg *config.CloudConfig) {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
if err := netconf.ApplyNetworkConfigs(&cfg.Rancher.Network); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
userSetHostname := cfg.Hostname != ""
|
||||
if err := netconf.RunDhcp(&cfg.Rancher.Network, !userSetHostname, !userSetDNS); err != nil {
|
||||
if err := netconf.ApplyNetworkConfigs(&cfg.Rancher.Network, userSetHostname, userSetDNS); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
log.Infof("Apply Network Config SyncHostname")
|
||||
if err := hostname.SyncHostname(); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
@@ -2,23 +2,30 @@ package power
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/docker/engine-api/types"
|
||||
"github.com/docker/engine-api/types/container"
|
||||
"github.com/docker/engine-api/types/filters"
|
||||
"github.com/rancher/os/cmd/control/install"
|
||||
"github.com/rancher/os/config"
|
||||
"github.com/rancher/os/log"
|
||||
|
||||
"github.com/rancher/os/docker"
|
||||
"github.com/rancher/os/util"
|
||||
)
|
||||
|
||||
// You can't shutdown the system from a process in console because we want to stop the console container.
|
||||
// If you do that you kill yourself. So we spawn a separate container to do power operations
|
||||
// This can up because on shutdown we want ssh to gracefully die, terminating ssh connections and not just hanging tcp session
|
||||
func runDocker(name string) error {
|
||||
if os.ExpandEnv("${IN_DOCKER}") == "true" {
|
||||
return nil
|
||||
@@ -76,21 +83,35 @@ func runDocker(name string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
go func() {
|
||||
client.ContainerAttach(context.Background(), types.ContainerAttachOptions{
|
||||
ContainerID: powerContainer.ID,
|
||||
Stream: true,
|
||||
Stderr: true,
|
||||
Stdout: true,
|
||||
})
|
||||
}()
|
||||
|
||||
err = client.ContainerStart(context.Background(), powerContainer.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = client.ContainerWait(context.Background(), powerContainer.ID)
|
||||
reader, err := client.ContainerLogs(context.Background(), types.ContainerLogsOptions{
|
||||
ContainerID: powerContainer.ID,
|
||||
ShowStderr: true,
|
||||
ShowStdout: true,
|
||||
Follow: true,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for {
|
||||
p := make([]byte, 4096)
|
||||
n, err := reader.Read(p)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
if n == 0 {
|
||||
reader.Close()
|
||||
break
|
||||
}
|
||||
}
|
||||
if n > 0 {
|
||||
fmt.Print(string(p))
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
@@ -100,40 +121,72 @@ func runDocker(name string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func common(name string) {
|
||||
func reboot(name string, force bool, code uint) {
|
||||
if os.Geteuid() != 0 {
|
||||
log.Fatalf("%s: Need to be root", os.Args[0])
|
||||
}
|
||||
|
||||
if err := runDocker(name); err != nil {
|
||||
log.Fatal(err)
|
||||
// Add shutdown timeout
|
||||
cfg := config.LoadConfig()
|
||||
timeoutValue := cfg.Rancher.ShutdownTimeout
|
||||
if timeoutValue == 0 {
|
||||
timeoutValue = 60
|
||||
}
|
||||
}
|
||||
if timeoutValue < 5 {
|
||||
timeoutValue = 5
|
||||
}
|
||||
log.Infof("Setting %s timeout to %d (rancher.shutdown_timeout set to %d)", os.Args[0], timeoutValue, cfg.Rancher.ShutdownTimeout)
|
||||
|
||||
func Off() {
|
||||
common("poweroff")
|
||||
reboot(syscall.LINUX_REBOOT_CMD_POWER_OFF)
|
||||
}
|
||||
go func() {
|
||||
timeout := time.After(time.Duration(timeoutValue) * time.Second)
|
||||
tick := time.Tick(100 * time.Millisecond)
|
||||
// Keep trying until we're timed out or got a result or got an error
|
||||
for {
|
||||
select {
|
||||
// Got a timeout! fail with a timeout error
|
||||
case <-timeout:
|
||||
log.Errorf("Container shutdown taking too long, forcing %s.", os.Args[0])
|
||||
syscall.Sync()
|
||||
syscall.Reboot(int(code))
|
||||
case <-tick:
|
||||
fmt.Printf(".")
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
func Reboot() {
|
||||
common("reboot")
|
||||
reboot(syscall.LINUX_REBOOT_CMD_RESTART)
|
||||
}
|
||||
// reboot -f should work even when system-docker is having problems
|
||||
if !force {
|
||||
if kexecFlag || previouskexecFlag || kexecAppendFlag != "" {
|
||||
// pass through the cmdline args
|
||||
name = ""
|
||||
}
|
||||
if err := runDocker(name); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func Halt() {
|
||||
common("halt")
|
||||
reboot(syscall.LINUX_REBOOT_CMD_HALT)
|
||||
}
|
||||
if kexecFlag || previouskexecFlag || kexecAppendFlag != "" {
|
||||
// need to mount boot dir, or `system-docker run -v /:/host -w /host/boot` ?
|
||||
baseName := "/mnt/new_img"
|
||||
_, _, err := install.MountDevice(baseName, "", "", false)
|
||||
if err != nil {
|
||||
log.Errorf("ERROR: can't Kexec: %s", err)
|
||||
return
|
||||
}
|
||||
defer util.Unmount(baseName)
|
||||
Kexec(previouskexecFlag, filepath.Join(baseName, install.BootDir), kexecAppendFlag)
|
||||
return
|
||||
}
|
||||
|
||||
func reboot(code uint) {
|
||||
err := shutDownContainers()
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
if !force {
|
||||
err := shutDownContainers()
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
syscall.Sync()
|
||||
|
||||
err = syscall.Reboot(int(code))
|
||||
err := syscall.Reboot(int(code))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
@@ -187,27 +240,63 @@ func shutDownContainers() error {
|
||||
}
|
||||
|
||||
var stopErrorStrings []string
|
||||
consoleContainerIdx := -1
|
||||
|
||||
for _, container := range containers {
|
||||
for idx, container := range containers {
|
||||
if container.ID == currentContainerID {
|
||||
continue
|
||||
}
|
||||
if container.Names[0] == "/console" {
|
||||
consoleContainerIdx = idx
|
||||
continue
|
||||
}
|
||||
|
||||
log.Infof("Stopping %s : %v", container.ID[:12], container.Names)
|
||||
log.Infof("Stopping %s : %s", container.Names[0], container.ID[:12])
|
||||
stopErr := client.ContainerStop(context.Background(), container.ID, timeout)
|
||||
if stopErr != nil {
|
||||
log.Errorf("------- Error Stopping %s : %s", container.Names[0], stopErr.Error())
|
||||
stopErrorStrings = append(stopErrorStrings, " ["+container.ID+"] "+stopErr.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// lets see what containers are still running and only wait on those
|
||||
containers, err = client.ContainerList(context.Background(), opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var waitErrorStrings []string
|
||||
|
||||
for _, container := range containers {
|
||||
for idx, container := range containers {
|
||||
if container.ID == currentContainerID {
|
||||
continue
|
||||
}
|
||||
if container.Names[0] == "/console" {
|
||||
consoleContainerIdx = idx
|
||||
continue
|
||||
}
|
||||
log.Infof("Waiting %s : %s", container.Names[0], container.ID[:12])
|
||||
_, waitErr := client.ContainerWait(context.Background(), container.ID)
|
||||
if waitErr != nil {
|
||||
log.Errorf("------- Error Waiting %s : %s", container.Names[0], waitErr.Error())
|
||||
waitErrorStrings = append(waitErrorStrings, " ["+container.ID+"] "+waitErr.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// and now stop the console
|
||||
if consoleContainerIdx != -1 {
|
||||
container := containers[consoleContainerIdx]
|
||||
log.Infof("Console Stopping %v : %s", container.Names, container.ID[:12])
|
||||
stopErr := client.ContainerStop(context.Background(), container.ID, timeout)
|
||||
if stopErr != nil {
|
||||
log.Errorf("------- Error Stopping %v : %s", container.Names, stopErr.Error())
|
||||
stopErrorStrings = append(stopErrorStrings, " ["+container.ID+"] "+stopErr.Error())
|
||||
}
|
||||
|
||||
log.Infof("Console Waiting %v : %s", container.Names, container.ID[:12])
|
||||
_, waitErr := client.ContainerWait(context.Background(), container.ID)
|
||||
if waitErr != nil {
|
||||
log.Errorf("------- Error Waiting %v : %s", container.Names, waitErr.Error())
|
||||
waitErrorStrings = append(waitErrorStrings, " ["+container.ID+"] "+waitErr.Error())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,48 +1,212 @@
|
||||
package power
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
|
||||
"github.com/codegangsta/cli"
|
||||
"github.com/rancher/os/cmd/control/install"
|
||||
"github.com/rancher/os/config"
|
||||
"github.com/rancher/os/log"
|
||||
)
|
||||
|
||||
func Main() {
|
||||
var (
|
||||
haltFlag bool
|
||||
poweroffFlag bool
|
||||
rebootFlag bool
|
||||
forceFlag bool
|
||||
kexecFlag bool
|
||||
previouskexecFlag bool
|
||||
kexecAppendFlag string
|
||||
)
|
||||
|
||||
func Shutdown() {
|
||||
log.InitLogger()
|
||||
app := cli.NewApp()
|
||||
|
||||
app.Name = os.Args[0]
|
||||
app.Usage = "Control and configure RancherOS"
|
||||
app.Usage = fmt.Sprintf("%s RancherOS\nbuilt: %s", app.Name, config.BuildDate)
|
||||
app.Version = config.Version
|
||||
app.Author = "Rancher Labs, Inc."
|
||||
app.Email = "sid@rancher.com"
|
||||
app.EnableBashCompletion = true
|
||||
app.Action = shutdown
|
||||
app.Flags = []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "r, R",
|
||||
Usage: "reboot after shutdown",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "h",
|
||||
Usage: "halt the system",
|
||||
// --no-wall
|
||||
// Do not send wall message before halt, power-off,
|
||||
// reboot.
|
||||
|
||||
// halt, poweroff, reboot ONLY
|
||||
// -f, --force
|
||||
// Force immediate halt, power-off, reboot. Do not
|
||||
// contact the init system.
|
||||
cli.BoolFlag{
|
||||
Name: "f, force",
|
||||
Usage: "Force immediate halt, power-off, reboot. Do not contact the init system.",
|
||||
Destination: &forceFlag,
|
||||
},
|
||||
|
||||
// -w, --wtmp-only
|
||||
// Only write wtmp shutdown entry, do not actually
|
||||
// halt, power-off, reboot.
|
||||
|
||||
// -d, --no-wtmp
|
||||
// Do not write wtmp shutdown entry.
|
||||
|
||||
// -n, --no-sync
|
||||
// Don't sync hard disks/storage media before halt,
|
||||
// power-off, reboot.
|
||||
|
||||
// shutdown ONLY
|
||||
// -h
|
||||
// Equivalent to --poweroff, unless --halt is
|
||||
// specified.
|
||||
|
||||
// -k
|
||||
// Do not halt, power-off, reboot, just write wall
|
||||
// message.
|
||||
|
||||
// -c
|
||||
// Cancel a pending shutdown. This may be used
|
||||
// cancel the effect of an invocation of shutdown
|
||||
// with a time argument that is not "+0" or "now".
|
||||
|
||||
}
|
||||
// -H, --halt
|
||||
// Halt the machine.
|
||||
if app.Name == "halt" {
|
||||
app.Flags = append(app.Flags, cli.BoolTFlag{
|
||||
Name: "H, halt",
|
||||
Usage: "halt the machine",
|
||||
Destination: &haltFlag,
|
||||
})
|
||||
} else {
|
||||
app.Flags = append(app.Flags, cli.BoolFlag{
|
||||
Name: "H, halt",
|
||||
Usage: "halt the machine",
|
||||
Destination: &haltFlag,
|
||||
})
|
||||
}
|
||||
// -P, --poweroff
|
||||
// Power-off the machine (the default for shutdown cmd).
|
||||
if app.Name == "poweroff" {
|
||||
app.Flags = append(app.Flags, cli.BoolTFlag{
|
||||
Name: "P, poweroff",
|
||||
Usage: "halt the machine",
|
||||
Destination: &poweroffFlag,
|
||||
})
|
||||
} else {
|
||||
app.Flags = append(app.Flags, cli.BoolFlag{
|
||||
Name: "P, poweroff",
|
||||
Usage: "halt the machine",
|
||||
Destination: &poweroffFlag,
|
||||
})
|
||||
}
|
||||
// -r, --reboot
|
||||
// Reboot the machine.
|
||||
if app.Name == "reboot" {
|
||||
app.Flags = append(app.Flags, cli.BoolTFlag{
|
||||
Name: "r, reboot",
|
||||
Usage: "reboot after shutdown",
|
||||
Destination: &rebootFlag,
|
||||
})
|
||||
// OR? maybe implement it as a `kexec` cli tool?
|
||||
app.Flags = append(app.Flags, cli.BoolFlag{
|
||||
Name: "kexec",
|
||||
Usage: "kexec the default RancherOS cfg",
|
||||
Destination: &kexecFlag,
|
||||
})
|
||||
app.Flags = append(app.Flags, cli.BoolFlag{
|
||||
Name: "kexec-previous",
|
||||
Usage: "kexec the previous RancherOS cfg",
|
||||
Destination: &previouskexecFlag,
|
||||
})
|
||||
app.Flags = append(app.Flags, cli.StringFlag{
|
||||
Name: "kexec-append",
|
||||
Usage: "kexec using the specified kernel boot params (ignores global.cfg)",
|
||||
Destination: &kexecAppendFlag,
|
||||
})
|
||||
} else {
|
||||
app.Flags = append(app.Flags, cli.BoolFlag{
|
||||
Name: "r, reboot",
|
||||
Usage: "reboot after shutdown",
|
||||
Destination: &rebootFlag,
|
||||
})
|
||||
}
|
||||
//TODO: add the time and msg flags...
|
||||
app.HideHelp = true
|
||||
|
||||
app.Run(os.Args)
|
||||
}
|
||||
|
||||
func shutdown(c *cli.Context) error {
|
||||
common("")
|
||||
reboot := c.String("r")
|
||||
poweroff := c.String("h")
|
||||
|
||||
if reboot == "now" {
|
||||
Reboot()
|
||||
} else if poweroff == "now" {
|
||||
Off()
|
||||
func Kexec(previous bool, bootDir, append string) error {
|
||||
cfg := "linux-current.cfg"
|
||||
if previous {
|
||||
cfg = "linux-previous.cfg"
|
||||
}
|
||||
cfgFile := filepath.Join(bootDir, cfg)
|
||||
vmlinuzFile, initrdFile, err := install.ReadSyslinuxCfg(cfgFile)
|
||||
if err != nil {
|
||||
log.Errorf("%s", err)
|
||||
return err
|
||||
}
|
||||
globalCfgFile := filepath.Join(bootDir, "global.cfg")
|
||||
if append == "" {
|
||||
append, err = install.ReadGlobalCfg(globalCfgFile)
|
||||
if err != nil {
|
||||
log.Errorf("%s", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
// TODO: read global.cfg if append == ""
|
||||
// kexec -l ${DIST}/vmlinuz --initrd=${DIST}/initrd --append="${kernelArgs} ${APPEND}" -f
|
||||
cmd := exec.Command(
|
||||
"kexec",
|
||||
"-l", vmlinuzFile,
|
||||
"--initrd", initrdFile,
|
||||
"--append", append,
|
||||
"-f")
|
||||
log.Debugf("Run(%#v)", cmd)
|
||||
cmd.Stderr = os.Stderr
|
||||
if _, err := cmd.Output(); err != nil {
|
||||
log.Errorf("Failed to kexec: %s", err)
|
||||
return err
|
||||
}
|
||||
log.Infof("kexec'd to new install")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Reboot is used by installation / upgrade
|
||||
// TODO: add kexec option
|
||||
func Reboot() {
|
||||
reboot("reboot", false, syscall.LINUX_REBOOT_CMD_RESTART)
|
||||
}
|
||||
|
||||
func shutdown(c *cli.Context) error {
|
||||
// the shutdown command's default is poweroff
|
||||
var powerCmd uint
|
||||
powerCmd = syscall.LINUX_REBOOT_CMD_POWER_OFF
|
||||
if rebootFlag {
|
||||
powerCmd = syscall.LINUX_REBOOT_CMD_RESTART
|
||||
} else if poweroffFlag {
|
||||
powerCmd = syscall.LINUX_REBOOT_CMD_POWER_OFF
|
||||
} else if haltFlag {
|
||||
powerCmd = syscall.LINUX_REBOOT_CMD_HALT
|
||||
}
|
||||
|
||||
timeArg := c.Args().Get(0)
|
||||
if c.App.Name == "shutdown" && timeArg != "" {
|
||||
if timeArg != "now" {
|
||||
err := fmt.Errorf("Sorry, can't parse '%s' as time value (only 'now' supported)", timeArg)
|
||||
log.Error(err)
|
||||
return err
|
||||
}
|
||||
// TODO: if there are more params, LOG them
|
||||
}
|
||||
|
||||
reboot(c.App.Name, forceFlag, powerCmd)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package respawn
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
@@ -13,6 +14,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/codegangsta/cli"
|
||||
"github.com/rancher/os/config"
|
||||
"github.com/rancher/os/log"
|
||||
)
|
||||
|
||||
@@ -28,6 +30,11 @@ func Main() {
|
||||
runtime.LockOSThread()
|
||||
app := cli.NewApp()
|
||||
|
||||
app.Name = os.Args[0]
|
||||
app.Usage = fmt.Sprintf("%s RancherOS\nbuilt: %s", app.Name, config.BuildDate)
|
||||
app.Version = config.Version
|
||||
app.Author = "Rancher Labs, Inc."
|
||||
|
||||
app.Flags = []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "file, f",
|
||||
@@ -36,6 +43,9 @@ func Main() {
|
||||
}
|
||||
app.Action = run
|
||||
|
||||
log.Infof("%s, %s", app.Usage, app.Version)
|
||||
fmt.Printf("%s, %s", app.Usage, app.Version)
|
||||
|
||||
app.Run(os.Args)
|
||||
}
|
||||
|
||||
@@ -69,17 +79,21 @@ func run(c *cli.Context) error {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
lines := strings.Split(string(input), "\n")
|
||||
doneChannel := make(chan string, len(lines))
|
||||
|
||||
for _, line := range strings.Split(string(input), "\n") {
|
||||
for _, line := range lines {
|
||||
if strings.TrimSpace(line) == "" || strings.Index(strings.TrimSpace(line), "#") == 0 {
|
||||
continue
|
||||
}
|
||||
wg.Add(1)
|
||||
go execute(line, &wg)
|
||||
go execute(line, doneChannel)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
for i := 0; i < len(lines); i++ {
|
||||
line := <-doneChannel
|
||||
log.Infof("FINISHED: %s", line)
|
||||
fmt.Printf("FINISHED: %s", line)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -101,19 +115,20 @@ func termPids() {
|
||||
defer processLock.Unlock()
|
||||
|
||||
for _, process := range processes {
|
||||
log.Infof("sending SIGTERM to %d", process.Pid)
|
||||
process.Signal(syscall.SIGTERM)
|
||||
}
|
||||
}
|
||||
|
||||
func execute(line string, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
func execute(line string, doneChannel chan string) {
|
||||
defer func() { doneChannel <- line }()
|
||||
|
||||
start := time.Now()
|
||||
count := 0
|
||||
|
||||
for {
|
||||
args := strings.Split(line, " ")
|
||||
args := strings.Split(line, " ")
|
||||
|
||||
for {
|
||||
cmd := exec.Command(args[0], args[1:]...)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
@@ -144,7 +159,7 @@ func execute(line string, wg *sync.WaitGroup) {
|
||||
count++
|
||||
|
||||
if count > 10 {
|
||||
if start.Sub(time.Now()) <= (1 * time.Second) {
|
||||
if time.Now().Sub(start) <= (1 * time.Second) {
|
||||
log.Errorf("%s : restarted too fast, not executing", line)
|
||||
break
|
||||
}
|
||||
|
||||
@@ -9,7 +9,8 @@ import (
|
||||
)
|
||||
|
||||
func Main() {
|
||||
log.InitLogger()
|
||||
log.SetLevel(log.DebugLevel)
|
||||
|
||||
if os.Geteuid() != 0 {
|
||||
log.Fatalf("%s: Need to be root", os.Args[0])
|
||||
}
|
||||
@@ -18,5 +19,5 @@ func Main() {
|
||||
os.Setenv("DOCKER_HOST", config.SystemDockerHost)
|
||||
}
|
||||
|
||||
docker.Main()
|
||||
docker.RancherOSMain()
|
||||
}
|
||||
|
||||
@@ -6,10 +6,12 @@ import (
|
||||
"golang.org/x/net/context"
|
||||
|
||||
yaml "github.com/cloudfoundry-incubator/candiedyaml"
|
||||
dockerClient "github.com/docker/engine-api/client"
|
||||
"github.com/docker/libcompose/cli/logger"
|
||||
composeConfig "github.com/docker/libcompose/config"
|
||||
"github.com/docker/libcompose/docker"
|
||||
composeClient "github.com/docker/libcompose/docker/client"
|
||||
|
||||
"github.com/docker/libcompose/project"
|
||||
"github.com/docker/libcompose/project/events"
|
||||
"github.com/docker/libcompose/project/options"
|
||||
@@ -245,13 +247,47 @@ func StageServices(cfg *config.CloudConfig, services ...string) error {
|
||||
}
|
||||
|
||||
// Reduce service configurations to just image and labels
|
||||
needToPull := false
|
||||
var client, userClient, systemClient dockerClient.APIClient
|
||||
for _, serviceName := range p.ServiceConfigs.Keys() {
|
||||
serviceConfig, _ := p.ServiceConfigs.Get(serviceName)
|
||||
|
||||
// test to see if we need to Pull
|
||||
if serviceConfig.Labels[config.ScopeLabel] != config.System {
|
||||
if userClient == nil {
|
||||
userClient, err = rosDocker.NewDefaultClient()
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
}
|
||||
client = userClient
|
||||
} else {
|
||||
if systemClient == nil {
|
||||
systemClient, err = rosDocker.NewSystemClient()
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
client = systemClient
|
||||
}
|
||||
}
|
||||
if client != nil {
|
||||
_, _, err := client.ImageInspectWithRaw(context.Background(), serviceConfig.Image, false)
|
||||
if err == nil {
|
||||
log.Infof("Service %s using local image %s", serviceName, serviceConfig.Image)
|
||||
continue
|
||||
}
|
||||
}
|
||||
needToPull = true
|
||||
|
||||
p.ServiceConfigs.Add(serviceName, &composeConfig.ServiceConfig{
|
||||
Image: serviceConfig.Image,
|
||||
Labels: serviceConfig.Labels,
|
||||
})
|
||||
}
|
||||
|
||||
return p.Pull(context.Background())
|
||||
if needToPull {
|
||||
return p.Pull(context.Background())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -15,23 +15,30 @@ import (
|
||||
func LoadService(p *project.Project, cfg *config.CloudConfig, useNetwork bool, service string) error {
|
||||
bytes, err := network.LoadServiceResource(service, useNetwork, cfg)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
m := map[interface{}]interface{}{}
|
||||
if err = yaml.Unmarshal(bytes, &m); err != nil {
|
||||
return fmt.Errorf("Failed to parse YAML configuration for %s: %v", service, err)
|
||||
e := fmt.Errorf("Failed to parse YAML configuration for %s: %v", service, err)
|
||||
log.Error(e)
|
||||
return e
|
||||
}
|
||||
|
||||
m = adjustContainerNames(m)
|
||||
|
||||
bytes, err = yaml.Marshal(m)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to marshal YAML configuration for %s: %v", service, err)
|
||||
e := fmt.Errorf("Failed to marshal YAML configuration for %s: %v", service, err)
|
||||
log.Error(e)
|
||||
return e
|
||||
}
|
||||
|
||||
if err = p.Load(bytes); err != nil {
|
||||
return fmt.Errorf("Failed to load %s: %v", service, err)
|
||||
e := fmt.Errorf("Failed to load %s: %v", service, err)
|
||||
log.Error(e)
|
||||
return e
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -85,7 +92,7 @@ func projectReload(p *project.Project, useNetwork *bool, loadConsole bool, envir
|
||||
|
||||
if err := LoadService(p, cfg, *useNetwork, service); err != nil {
|
||||
if err != network.ErrNoNetwork {
|
||||
log.Error(err)
|
||||
log.Errorf("Failed to load service(%s): %v", service, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
@@ -99,12 +106,12 @@ func projectReload(p *project.Project, useNetwork *bool, loadConsole bool, envir
|
||||
|
||||
if loadConsole {
|
||||
if err := loadConsoleService(cfg, p); err != nil {
|
||||
log.Errorf("Failed to load console: %v", err)
|
||||
log.Errorf("Failed to load gancher.console=(%s): %v", cfg.Rancher.Console, err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := loadEngineService(cfg, p); err != nil {
|
||||
log.Errorf("Failed to load engine: %v", err)
|
||||
log.Errorf("Failed to load rancher.docker.engine=(%s): %v", cfg.Rancher.Docker.Engine, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
38
config/cloudinit/Documentation/cloud-config-deprecated.md
Normal file
38
config/cloudinit/Documentation/cloud-config-deprecated.md
Normal file
@@ -0,0 +1,38 @@
|
||||
# Deprecated Cloud-Config Features
|
||||
|
||||
## Retrieving SSH Authorized Keys
|
||||
|
||||
### From a GitHub User
|
||||
|
||||
Using the `coreos-ssh-import-github` field, we can import public SSH keys from a GitHub user to use as authorized keys to a server.
|
||||
|
||||
```yaml
|
||||
#cloud-config
|
||||
|
||||
users:
|
||||
- name: elroy
|
||||
coreos-ssh-import-github: elroy
|
||||
```
|
||||
|
||||
### From an HTTP Endpoint
|
||||
|
||||
We can also pull public SSH keys from any HTTP endpoint which matches [GitHub's API response format](https://developer.github.com/v3/users/keys/#list-public-keys-for-a-user).
|
||||
For example, if you have an installation of GitHub Enterprise, you can provide a complete URL with an authentication token:
|
||||
|
||||
```yaml
|
||||
#cloud-config
|
||||
|
||||
users:
|
||||
- name: elroy
|
||||
coreos-ssh-import-url: https://github-enterprise.example.com/api/v3/users/elroy/keys?access_token=<TOKEN>
|
||||
```
|
||||
|
||||
You can also specify any URL whose response matches the JSON format for public keys:
|
||||
|
||||
```yaml
|
||||
#cloud-config
|
||||
|
||||
users:
|
||||
- name: elroy
|
||||
coreos-ssh-import-url: https://example.com/public-keys
|
||||
```
|
||||
26
config/cloudinit/Documentation/cloud-config-locations.md
Normal file
26
config/cloudinit/Documentation/cloud-config-locations.md
Normal file
@@ -0,0 +1,26 @@
|
||||
# Cloud-Config Locations
|
||||
|
||||
On every boot, coreos-cloudinit looks for a config file to configure your host. Here is a list of locations which are used by the Cloud-Config utility, depending on your CoreOS platform:
|
||||
|
||||
| Location | Description |
|
||||
| --- | --- |
|
||||
| `/media/configvirtfs/openstack/latest/user_data` | `/media/configvirtfs` mount point with [config-2](/os/docs/latest/config-drive.html#contents-and-format) label. It should contain a `openstack/latest/user_data` relative path. Usually used by cloud providers or in VM installations. |
|
||||
| `/media/configdrive/openstack/latest/user_data` | FAT or ISO9660 filesystem with [config-2](/os/docs/latest/config-drive.html#qemu-virtfs) label and `/media/configdrive/` mount point. It should also contain a `openstack/latest/user_data` relative path. Usually used in installations which are configured by USB Flash sticks or CDROM media. |
|
||||
| Kernel command line: `cloud-config-url=http://example.com/user_data`. | You can find this string using this command `cat /proc/cmdline`. Usually used in [PXE](/os/docs/latest/booting-with-pxe.html) or [iPXE](/os/docs/latest/booting-with-ipxe.html) boots. |
|
||||
| `/var/lib/coreos-install/user_data` | When you install CoreOS manually using the [coreos-install](/os/docs/latest/installing-to-disk.html) tool. Usually used in bare metal installations. |
|
||||
| `/usr/share/oem/cloud-config.yml` | Path for OEM images. |
|
||||
| `/var/lib/coreos-vagrant/vagrantfile-user-data`| Vagrant OEM scripts automatically store Cloud-Config into this path. |
|
||||
| `/var/lib/waagent/CustomData`| Azure platform uses OEM path for first Cloud-Config initialization and then `/var/lib/waagent/CustomData` to apply your settings. |
|
||||
| `http://169.254.169.254/metadata/v1/user-data` `http://169.254.169.254/2009-04-04/user-data` `https://metadata.packet.net/userdata`|DigitalOcean, EC2 and Packet cloud providers correspondingly use these URLs to download Cloud-Config.|
|
||||
| `/usr/share/oem/bin/vmtoolsd --cmd "info-get guestinfo.coreos.config.data"` | Cloud-Config provided by [VMware Guestinfo][VMware Guestinfo] |
|
||||
| `/usr/share/oem/bin/vmtoolsd --cmd "info-get guestinfo.coreos.config.url"` | Cloud-Config URL provided by [VMware Guestinfo][VMware Guestinfo] |
|
||||
|
||||
[VMware Guestinfo]: vmware-guestinfo.md
|
||||
|
||||
You can also run the `coreos-cloudinit` tool manually and provide a path to your custom Cloud-Config file:
|
||||
|
||||
```sh
|
||||
sudo coreos-cloudinit --from-file=/home/core/cloud-config.yaml
|
||||
```
|
||||
|
||||
This command will apply your custom cloud-config.
|
||||
37
config/cloudinit/Documentation/cloud-config-oem.md
Normal file
37
config/cloudinit/Documentation/cloud-config-oem.md
Normal file
@@ -0,0 +1,37 @@
|
||||
## OEM configuration
|
||||
|
||||
The `coreos.oem.*` parameters follow the [os-release spec][os-release], but have been repurposed as a way for coreos-cloudinit to know about the OEM partition on this machine. Customizing this section is only needed when generating a new OEM of CoreOS from the SDK. The fields include:
|
||||
|
||||
- **id**: Lowercase string identifying the OEM
|
||||
- **name**: Human-friendly string representing the OEM
|
||||
- **version-id**: Lowercase string identifying the version of the OEM
|
||||
- **home-url**: Link to the homepage of the provider or OEM
|
||||
- **bug-report-url**: Link to a place to file bug reports about this OEM
|
||||
|
||||
coreos-cloudinit renders these fields to `/etc/oem-release`.
|
||||
If no **id** field is provided, coreos-cloudinit will ignore this section.
|
||||
|
||||
For example, the following cloud-config document...
|
||||
|
||||
```yaml
|
||||
#cloud-config
|
||||
coreos:
|
||||
oem:
|
||||
id: "rackspace"
|
||||
name: "Rackspace Cloud Servers"
|
||||
version-id: "168.0.0"
|
||||
home-url: "https://www.rackspace.com/cloud/servers/"
|
||||
bug-report-url: "https://github.com/coreos/coreos-overlay"
|
||||
```
|
||||
|
||||
...would be rendered to the following `/etc/oem-release`:
|
||||
|
||||
```yaml
|
||||
ID=rackspace
|
||||
NAME="Rackspace Cloud Servers"
|
||||
VERSION_ID=168.0.0
|
||||
HOME_URL="https://www.rackspace.com/cloud/servers/"
|
||||
BUG_REPORT_URL="https://github.com/coreos/coreos-overlay"
|
||||
```
|
||||
|
||||
[os-release]: http://www.freedesktop.org/software/systemd/man/os-release.html
|
||||
485
config/cloudinit/Documentation/cloud-config.md
Normal file
485
config/cloudinit/Documentation/cloud-config.md
Normal file
@@ -0,0 +1,485 @@
|
||||
# Using Cloud-Config
|
||||
|
||||
CoreOS allows you to declaratively customize various OS-level items, such as network configuration, user accounts, and systemd units. This document describes the full list of items we can configure. The `coreos-cloudinit` program uses these files as it configures the OS after startup or during runtime.
|
||||
|
||||
Your cloud-config is processed during each boot. Invalid cloud-config won't be processed but will be logged in the journal. You can validate your cloud-config with the [CoreOS online validator](https://coreos.com/validate/) or by running `coreos-cloudinit -validate`. In addition to these two validation methods you can debug `coreos-cloudinit` system output through the `journalctl` tool:
|
||||
|
||||
```sh
|
||||
journalctl --identifier=coreos-cloudinit
|
||||
```
|
||||
|
||||
It will show `coreos-cloudinit` run output which was triggered by system boot.
|
||||
|
||||
## Configuration File
|
||||
|
||||
The file used by this system initialization program is called a "cloud-config" file. It is inspired by the [cloud-init][cloud-init] project's [cloud-config][cloud-config] file, which is "the defacto multi-distribution package that handles early initialization of a cloud instance" ([cloud-init docs][cloud-init-docs]). Because the cloud-init project includes tools which aren't used by CoreOS, only the relevant subset of its configuration items will be implemented in our cloud-config file. In addition to those, we added a few CoreOS-specific items, such as etcd configuration, OEM definition, and systemd units.
|
||||
|
||||
We've designed our implementation to allow the same cloud-config file to work across all of our supported platforms.
|
||||
|
||||
[cloud-init]: https://launchpad.net/cloud-init
|
||||
[cloud-init-docs]: http://cloudinit.readthedocs.org/en/latest/index.html
|
||||
[cloud-config]: http://cloudinit.readthedocs.org/en/latest/topics/format.html#cloud-config-data
|
||||
|
||||
### File Format
|
||||
|
||||
The cloud-config file uses the [YAML][yaml] file format, which uses whitespace and new-lines to delimit lists, associative arrays, and values.
|
||||
|
||||
A cloud-config file must contain a header: either `#cloud-config` for processing as cloud-config (suggested) or `#!` for processing as a shell script (advanced). If cloud-config has the `#cloud-config` header, it should followed by an associative array which has zero or more of the following keys:
|
||||
|
||||
- `coreos`
|
||||
- `ssh_authorized_keys`
|
||||
- `hostname`
|
||||
- `users`
|
||||
- `write_files`
|
||||
- `manage_etc_hosts`
|
||||
|
||||
The expected values for these keys are defined in the rest of this document.
|
||||
|
||||
If cloud-config header starts on `#!` then coreos-cloudinit will recognize it as shell script which is interpreted by bash and run it as transient systemd service.
|
||||
|
||||
[yaml]: https://en.wikipedia.org/wiki/YAML
|
||||
|
||||
### Providing Cloud-Config with Config-Drive
|
||||
|
||||
CoreOS tries to conform to each platform's native method to provide user data. Each cloud provider tends to be unique, but this complexity has been abstracted by CoreOS. You can view each platform's instructions on their documentation pages. The most universal way to provide cloud-config is [via config-drive](https://github.com/rancher/os/config/cloudinit/blob/master/Documentation/config-drive.md), which attaches a read-only device to the machine, that contains your cloud-config file.
|
||||
|
||||
## Configuration Parameters
|
||||
|
||||
### coreos
|
||||
|
||||
#### etcd (deprecated. see etcd2)
|
||||
|
||||
The `coreos.etcd.*` parameters will be translated to a partial systemd unit acting as an etcd configuration file.
|
||||
If the platform environment supports the templating feature of coreos-cloudinit it is possible to automate etcd configuration with the `$private_ipv4` and `$public_ipv4` fields. For example, the following cloud-config document...
|
||||
|
||||
```yaml
|
||||
#cloud-config
|
||||
|
||||
coreos:
|
||||
etcd:
|
||||
name: "node001"
|
||||
# generate a new token for each unique cluster from https://discovery.etcd.io/new
|
||||
discovery: "https://discovery.etcd.io/<token>"
|
||||
# multi-region and multi-cloud deployments need to use $public_ipv4
|
||||
addr: "$public_ipv4:4001"
|
||||
peer-addr: "$private_ipv4:7001"
|
||||
```
|
||||
|
||||
...will generate a systemd unit drop-in for etcd.service with the following contents:
|
||||
|
||||
```yaml
|
||||
[Service]
|
||||
Environment="ETCD_NAME=node001"
|
||||
Environment="ETCD_DISCOVERY=https://discovery.etcd.io/<token>"
|
||||
Environment="ETCD_ADDR=203.0.113.29:4001"
|
||||
Environment="ETCD_PEER_ADDR=192.0.2.13:7001"
|
||||
```
|
||||
|
||||
For more information about the available configuration parameters, see the [etcd documentation][etcd-config].
|
||||
|
||||
_Note: The `$private_ipv4` and `$public_ipv4` substitution variables referenced in other documents are only supported on Amazon EC2, Google Compute Engine, OpenStack, Rackspace, DigitalOcean, and Vagrant._
|
||||
|
||||
[etcd-config]: https://github.com/coreos/etcd/blob/release-0.4/Documentation/configuration.md
|
||||
|
||||
#### etcd2
|
||||
|
||||
The `coreos.etcd2.*` parameters will be translated to a partial systemd unit acting as an etcd configuration file.
|
||||
If the platform environment supports the templating feature of coreos-cloudinit it is possible to automate etcd configuration with the `$private_ipv4` and `$public_ipv4` fields. When generating a [discovery token](https://discovery.etcd.io/new?size=3), set the `size` parameter, since etcd uses this to determine if all members have joined the cluster. After the cluster is bootstrapped, it can grow or shrink from this configured size.
|
||||
|
||||
For example, the following cloud-config document...
|
||||
|
||||
```yaml
|
||||
#cloud-config
|
||||
|
||||
coreos:
|
||||
etcd2:
|
||||
# generate a new token for each unique cluster from https://discovery.etcd.io/new?size=3
|
||||
discovery: "https://discovery.etcd.io/<token>"
|
||||
# multi-region and multi-cloud deployments need to use $public_ipv4
|
||||
advertise-client-urls: "http://$public_ipv4:2379"
|
||||
initial-advertise-peer-urls: "http://$private_ipv4:2380"
|
||||
# listen on both the official ports and the legacy ports
|
||||
# legacy ports can be omitted if your application doesn't depend on them
|
||||
listen-client-urls: "http://0.0.0.0:2379,http://0.0.0.0:4001"
|
||||
listen-peer-urls: "http://$private_ipv4:2380,http://$private_ipv4:7001"
|
||||
```
|
||||
|
||||
...will generate a systemd unit drop-in for etcd2.service with the following contents:
|
||||
|
||||
```yaml
|
||||
[Service]
|
||||
Environment="ETCD_DISCOVERY=https://discovery.etcd.io/<token>"
|
||||
Environment="ETCD_ADVERTISE_CLIENT_URLS=http://203.0.113.29:2379"
|
||||
Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=http://192.0.2.13:2380"
|
||||
Environment="ETCD_LISTEN_CLIENT_URLS=http://0.0.0.0:2379,http://0.0.0.0:4001"
|
||||
Environment="ETCD_LISTEN_PEER_URLS=http://192.0.2.13:2380,http://192.0.2.13:7001"
|
||||
```
|
||||
|
||||
For more information about the available configuration parameters, see the [etcd2 documentation][etcd2-config].
|
||||
|
||||
_Note: The `$private_ipv4` and `$public_ipv4` substitution variables referenced in other documents are only supported on Amazon EC2, Google Compute Engine, OpenStack, Rackspace, DigitalOcean, and Vagrant._
|
||||
|
||||
[etcd2-config]: https://github.com/coreos/etcd/blob/v2.3.2/Documentation/configuration.md
|
||||
|
||||
#### fleet
|
||||
|
||||
The `coreos.fleet.*` parameters work very similarly to `coreos.etcd2.*`, and allow for the configuration of fleet through environment variables. For example, the following cloud-config document...
|
||||
|
||||
```yaml
|
||||
#cloud-config
|
||||
|
||||
coreos:
|
||||
fleet:
|
||||
public-ip: "$public_ipv4"
|
||||
metadata: "region=us-west"
|
||||
```
|
||||
|
||||
...will generate a systemd unit drop-in like this:
|
||||
|
||||
```yaml
|
||||
[Service]
|
||||
Environment="FLEET_PUBLIC_IP=203.0.113.29"
|
||||
Environment="FLEET_METADATA=region=us-west"
|
||||
```
|
||||
|
||||
List of fleet configuration parameters:
|
||||
|
||||
- **agent_ttl**: An Agent will be considered dead if it exceeds this amount of time to communicate with the Registry
|
||||
- **engine_reconcile_interval**: Interval in seconds at which the engine should reconcile the cluster schedule in etcd
|
||||
- **etcd_cafile**: Path to CA file used for TLS communication with etcd
|
||||
- **etcd_certfile**: Provide TLS configuration when SSL certificate authentication is enabled in etcd endpoints
|
||||
- **etcd_keyfile**: Path to private key file used for TLS communication with etcd
|
||||
- **etcd_key_prefix**: etcd prefix path to be used for fleet keys
|
||||
- **etcd_request_timeout**: Amount of time in seconds to allow a single etcd request before considering it failed
|
||||
- **etcd_servers**: Comma separated list of etcd endpoints
|
||||
- **etcd_username**: Username for Basic Authentication to etcd endpoints
|
||||
- **etcd_password**: Password for Basic Authentication to etcd endpoints
|
||||
- **metadata**: Comma separated key/value pairs that are published with the local to the fleet registry
|
||||
- **public_ip**: IP accessible by other nodes for inter-host communication
|
||||
- **verbosity**: Enable debug logging by setting this to an integer value greater than zero
|
||||
|
||||
For more information on fleet configuration, see the [fleet documentation][fleet-config].
|
||||
|
||||
[fleet-config]: https://github.com/coreos/fleet/blob/master/Documentation/deployment-and-configuration.md#configuration
|
||||
|
||||
#### flannel
|
||||
|
||||
The `coreos.flannel.*` parameters also work very similarly to `coreos.etcd2.*`
|
||||
and `coreos.fleet.*`. They can be used to set environment variables for
|
||||
flanneld. For example, the following cloud-config...
|
||||
|
||||
```yaml
|
||||
#cloud-config
|
||||
|
||||
coreos:
|
||||
flannel:
|
||||
etcd_prefix: "/coreos.com/network2"
|
||||
```
|
||||
|
||||
...will generate a systemd unit drop-in like so:
|
||||
|
||||
```
|
||||
[Service]
|
||||
Environment="FLANNELD_ETCD_PREFIX=/coreos.com/network2"
|
||||
```
|
||||
|
||||
List of flannel configuration parameters:
|
||||
|
||||
- **etcd_endpoints**: Comma separated list of etcd endpoints
|
||||
- **etcd_cafile**: Path to CA file used for TLS communication with etcd
|
||||
- **etcd_certfile**: Path to certificate file used for TLS communication with etcd
|
||||
- **etcd_keyfile**: Path to private key file used for TLS communication with etcd
|
||||
- **etcd_prefix**: etcd prefix path to be used for flannel keys
|
||||
- **etcd_username**: Username for Basic Authentication to etcd endpoints
|
||||
- **etcd_password**: Password for Basic Authentication to etcd endpoints
|
||||
- **ip_masq**: Install IP masquerade rules for traffic outside of flannel subnet
|
||||
- **subnet_file**: Path to flannel subnet file to write out
|
||||
- **interface**: Interface (name or IP) that should be used for inter-host communication
|
||||
- **public_ip**: IP accessible by other nodes for inter-host communication
|
||||
|
||||
For more information on flannel configuration, see the [flannel documentation][flannel-readme].
|
||||
|
||||
[flannel-readme]: https://github.com/coreos/flannel/blob/master/README.md
|
||||
|
||||
#### locksmith
|
||||
|
||||
The `coreos.locksmith.*` parameters can be used to set environment variables
|
||||
for locksmith. For example, the following cloud-config...
|
||||
|
||||
```yaml
|
||||
#cloud-config
|
||||
|
||||
coreos:
|
||||
locksmith:
|
||||
endpoint: "http://example.com:2379"
|
||||
```
|
||||
|
||||
...will generate a systemd unit drop-in like so:
|
||||
|
||||
```
|
||||
[Service]
|
||||
Environment="LOCKSMITHD_ENDPOINT=http://example.com:2379"
|
||||
```
|
||||
|
||||
List of locksmith configuration parameters:
|
||||
|
||||
- **endpoint**: Comma separated list of etcd endpoints
|
||||
- **etcd_cafile**: Path to CA file used for TLS communication with etcd
|
||||
- **etcd_certfile**: Path to certificate file used for TLS communication with etcd
|
||||
- **etcd_keyfile**: Path to private key file used for TLS communication with etcd
|
||||
- **group**: Name of the reboot group in which this instance belongs
|
||||
- **window_start**: Start time of the reboot window
|
||||
- **window_length**: Duration of the reboot window
|
||||
- **etcd_username**: Username for Basic Authentication to etcd endpoints
|
||||
- **etcd_password**: Password for Basic Authentication to etcd endpoints
|
||||
|
||||
For the complete list of locksmith configuration parameters, see the [locksmith documentation][locksmith-readme].
|
||||
|
||||
[locksmith-readme]: https://github.com/coreos/locksmith/blob/master/README.md
|
||||
|
||||
#### update
|
||||
|
||||
The `coreos.update.*` parameters manipulate settings related to how CoreOS instances are updated.
|
||||
|
||||
These fields will be written out to and replace `/etc/coreos/update.conf`. If only one of the parameters is given it will only overwrite the given field.
|
||||
The `reboot-strategy` parameter also affects the behaviour of [locksmith](https://github.com/coreos/locksmith).
|
||||
|
||||
- **reboot-strategy**: One of "reboot", "etcd-lock", "best-effort" or "off" for controlling when reboots are issued after an update is performed.
|
||||
- _reboot_: Reboot immediately after an update is applied.
|
||||
- _etcd-lock_: Reboot after first taking a distributed lock in etcd, this guarantees that only one host will reboot concurrently and that the cluster will remain available during the update.
|
||||
- _best-effort_ - If etcd is running, "etcd-lock", otherwise simply "reboot".
|
||||
- _off_ - Disable rebooting after updates are applied (not recommended).
|
||||
- **server**: The location of the [CoreUpdate][coreupdate] server which will be queried for updates. Also known as the [omaha][omaha-docs] server endpoint.
|
||||
- **group**: signifies the channel which should be used for automatic updates. This value defaults to the version of the image initially downloaded. (one of "master", "alpha", "beta", "stable")
|
||||
|
||||
[coreupdate]: https://coreos.com/products/coreupdate
|
||||
[omaha-docs]: https://coreos.com/docs/coreupdate/custom-apps/coreupdate-protocol/
|
||||
|
||||
*Note: cloudinit will only manipulate the locksmith unit file in the systemd runtime directory (`/run/systemd/system/locksmithd.service`). If any manual modifications are made to an overriding unit configuration file (e.g. `/etc/systemd/system/locksmithd.service`), cloudinit will no longer be able to control the locksmith service unit.*
|
||||
|
||||
##### Example
|
||||
|
||||
```yaml
|
||||
#cloud-config
|
||||
coreos:
|
||||
update:
|
||||
reboot-strategy: "etcd-lock"
|
||||
```
|
||||
|
||||
#### units
|
||||
|
||||
The `coreos.units.*` parameters define a list of arbitrary systemd units to start after booting. This feature is intended to help you start essential services required to mount storage and configure networking in order to join the CoreOS cluster. It is not intended to be a Chef/Puppet replacement.
|
||||
|
||||
Each item is an object with the following fields:
|
||||
|
||||
- **name**: String representing unit's name. Required.
|
||||
- **runtime**: Boolean indicating whether or not to persist the unit across reboots. This is analogous to the `--runtime` argument to `systemctl enable`. The default value is false.
|
||||
- **enable**: Boolean indicating whether or not to handle the [Install] section of the unit file. This is similar to running `systemctl enable <name>`. The default value is false.
|
||||
- **content**: Plaintext string representing entire unit file. If no value is provided, the unit is assumed to exist already.
|
||||
- **command**: Command to execute on unit: start, stop, reload, restart, try-restart, reload-or-restart, reload-or-try-restart. The default behavior is to not execute any commands.
|
||||
- **mask**: Whether to mask the unit file by symlinking it to `/dev/null` (analogous to `systemctl mask <name>`). Note that unlike `systemctl mask`, **this will destructively remove any existing unit file** located at `/etc/systemd/system/<unit>`, to ensure that the mask succeeds. The default value is false.
|
||||
- **drop-ins**: A list of unit drop-ins with the following fields:
|
||||
- **name**: String representing unit's name. Required.
|
||||
- **content**: Plaintext string representing entire file. Required.
|
||||
|
||||
|
||||
**NOTE:** The command field is ignored for all network, netdev, and link units. The systemd-networkd.service unit will be restarted in their place.
|
||||
|
||||
##### Examples
|
||||
|
||||
Write a unit to disk, automatically starting it.
|
||||
|
||||
```yaml
|
||||
#cloud-config
|
||||
|
||||
coreos:
|
||||
units:
|
||||
- name: "docker-redis.service"
|
||||
command: "start"
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Redis container
|
||||
Author=Me
|
||||
After=docker.service
|
||||
|
||||
[Service]
|
||||
Restart=always
|
||||
ExecStart=/usr/bin/docker start -a redis_server
|
||||
ExecStop=/usr/bin/docker stop -t 2 redis_server
|
||||
```
|
||||
|
||||
Add the DOCKER_OPTS environment variable to docker.service.
|
||||
|
||||
```yaml
|
||||
#cloud-config
|
||||
|
||||
coreos:
|
||||
units:
|
||||
- name: "docker.service"
|
||||
drop-ins:
|
||||
- name: "50-insecure-registry.conf"
|
||||
content: |
|
||||
[Service]
|
||||
Environment=DOCKER_OPTS='--insecure-registry="10.0.1.0/24"'
|
||||
```
|
||||
|
||||
Start the built-in `etcd2` and `fleet` services:
|
||||
|
||||
```yaml
|
||||
#cloud-config
|
||||
|
||||
coreos:
|
||||
units:
|
||||
- name: "etcd2.service"
|
||||
command: "start"
|
||||
- name: "fleet.service"
|
||||
command: "start"
|
||||
```
|
||||
|
||||
### ssh_authorized_keys
|
||||
|
||||
The `ssh_authorized_keys` parameter adds public SSH keys which will be authorized for the `core` user.
|
||||
|
||||
The keys will be named "coreos-cloudinit" by default.
|
||||
Override this by using the `--ssh-key-name` flag when calling `coreos-cloudinit`.
|
||||
|
||||
```yaml
|
||||
#cloud-config
|
||||
|
||||
ssh_authorized_keys:
|
||||
- "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC0g+ZTxC7weoIJLUafOgrm+h..."
|
||||
```
|
||||
|
||||
### hostname
|
||||
|
||||
The `hostname` parameter defines the system's hostname.
|
||||
This is the local part of a fully-qualified domain name (i.e. `foo` in `foo.example.com`).
|
||||
|
||||
```yaml
|
||||
#cloud-config
|
||||
|
||||
hostname: "coreos1"
|
||||
```
|
||||
|
||||
### users
|
||||
|
||||
The `users` parameter adds or modifies the specified list of users. Each user is an object which consists of the following fields. Each field is optional and of type string unless otherwise noted.
|
||||
All but the `passwd` and `ssh-authorized-keys` fields will be ignored if the user already exists.
|
||||
|
||||
- **name**: Required. Login name of user
|
||||
- **gecos**: GECOS comment of user
|
||||
- **passwd**: Hash of the password to use for this user
|
||||
- **homedir**: User's home directory. Defaults to /home/\<name\>
|
||||
- **no-create-home**: Boolean. Skip home directory creation.
|
||||
- **primary-group**: Default group for the user. Defaults to a new group created named after the user.
|
||||
- **groups**: Add user to these additional groups
|
||||
- **no-user-group**: Boolean. Skip default group creation.
|
||||
- **ssh-authorized-keys**: List of public SSH keys to authorize for this user
|
||||
- **coreos-ssh-import-github** [DEPRECATED]: Authorize SSH keys from GitHub user
|
||||
- **coreos-ssh-import-github-users** [DEPRECATED]: Authorize SSH keys from a list of GitHub users
|
||||
- **coreos-ssh-import-url** [DEPRECATED]: Authorize SSH keys imported from a url endpoint.
|
||||
- **system**: Create the user as a system user. No home directory will be created.
|
||||
- **no-log-init**: Boolean. Skip initialization of lastlog and faillog databases.
|
||||
- **shell**: User's login shell.
|
||||
|
||||
The following fields are not yet implemented:
|
||||
|
||||
- **inactive**: Deactivate the user upon creation
|
||||
- **lock-passwd**: Boolean. Disable password login for user
|
||||
- **sudo**: Entry to add to /etc/sudoers for user. By default, no sudo access is authorized.
|
||||
- **selinux-user**: Corresponding SELinux user
|
||||
- **ssh-import-id**: Import SSH keys by ID from Launchpad.
|
||||
|
||||
```yaml
|
||||
#cloud-config
|
||||
|
||||
users:
|
||||
- name: "elroy"
|
||||
passwd: "$6$5s2u6/jR$un0AvWnqilcgaNB3Mkxd5yYv6mTlWfOoCYHZmfi3LDKVltj.E8XNKEcwWm..."
|
||||
groups:
|
||||
- "sudo"
|
||||
- "docker"
|
||||
ssh-authorized-keys:
|
||||
- "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC0g+ZTxC7weoIJLUafOgrm+h..."
|
||||
```
|
||||
|
||||
#### Generating a password hash
|
||||
|
||||
If you choose to use a password instead of an SSH key, generating a safe hash is extremely important to the security of your system. Simplified hashes like md5crypt are trivial to crack on modern GPU hardware. Here are a few ways to generate secure hashes:
|
||||
|
||||
```
|
||||
# On Debian/Ubuntu (via the package "whois")
|
||||
mkpasswd --method=SHA-512 --rounds=4096
|
||||
|
||||
# OpenSSL (note: this will only make md5crypt. While better than plantext it should not be considered fully secure)
|
||||
openssl passwd -1
|
||||
|
||||
# Python (change password and salt values)
|
||||
python -c "import crypt, getpass, pwd; print crypt.crypt('password', '\$6\$SALT\$')"
|
||||
|
||||
# Perl (change password and salt values)
|
||||
perl -e 'print crypt("password","\$6\$SALT\$") . "\n"'
|
||||
```
|
||||
|
||||
Using a higher number of rounds will help create more secure passwords, but given enough time, password hashes can be reversed. On most RPM based distributions there is a tool called mkpasswd available in the `expect` package, but this does not handle "rounds" nor advanced hashing algorithms.
|
||||
|
||||
### write_files
|
||||
|
||||
The `write_files` directive defines a set of files to create on the local filesystem.
|
||||
Each item in the list may have the following keys:
|
||||
|
||||
- **path**: Absolute location on disk where contents should be written
|
||||
- **content**: Data to write at the provided `path`
|
||||
- **permissions**: Integer representing file permissions, typically in octal notation (i.e. 0644)
|
||||
- **owner**: User and group that should own the file written to disk. This is equivalent to the `<user>:<group>` argument to `chown <user>:<group> <path>`.
|
||||
- **encoding**: Optional. The encoding of the data in content. If not specified this defaults to the yaml document encoding (usually utf-8). Supported encoding types are:
|
||||
- **b64, base64**: Base64 encoded content
|
||||
- **gz, gzip**: gzip encoded content, for use with the !!binary tag
|
||||
- **gz+b64, gz+base64, gzip+b64, gzip+base64**: Base64 encoded gzip content
|
||||
|
||||
|
||||
```yaml
|
||||
#cloud-config
|
||||
write_files:
|
||||
- path: "/etc/resolv.conf"
|
||||
permissions: "0644"
|
||||
owner: "root"
|
||||
content: |
|
||||
nameserver 8.8.8.8
|
||||
- path: "/etc/motd"
|
||||
permissions: "0644"
|
||||
owner: "root"
|
||||
content: |
|
||||
Good news, everyone!
|
||||
- path: "/tmp/like_this"
|
||||
permissions: "0644"
|
||||
owner: "root"
|
||||
encoding: "gzip"
|
||||
content: !!binary |
|
||||
H4sIAKgdh1QAAwtITM5WyK1USMqvUCjPLMlQSMssS1VIya9KzVPIySwszS9SyCpNLwYARQFQ5CcAAAA=
|
||||
- path: "/tmp/or_like_this"
|
||||
permissions: "0644"
|
||||
owner: "root"
|
||||
encoding: "gzip+base64"
|
||||
content: |
|
||||
H4sIAKgdh1QAAwtITM5WyK1USMqvUCjPLMlQSMssS1VIya9KzVPIySwszS9SyCpNLwYARQFQ5CcAAAA=
|
||||
- path: "/tmp/todolist"
|
||||
permissions: "0644"
|
||||
owner: "root"
|
||||
encoding: "base64"
|
||||
content: |
|
||||
UGFjayBteSBib3ggd2l0aCBmaXZlIGRvemVuIGxpcXVvciBqdWdz
|
||||
```
|
||||
|
||||
### manage_etc_hosts
|
||||
|
||||
The `manage_etc_hosts` parameter configures the contents of the `/etc/hosts` file, which is used for local name resolution.
|
||||
Currently, the only supported value is "localhost" which will cause your system's hostname
|
||||
to resolve to "127.0.0.1". This is helpful when the host does not have DNS
|
||||
infrastructure in place to resolve its own hostname, for example, when using Vagrant.
|
||||
|
||||
```yaml
|
||||
#cloud-config
|
||||
|
||||
manage_etc_hosts: "localhost"
|
||||
```
|
||||
40
config/cloudinit/Documentation/config-drive.md
Normal file
40
config/cloudinit/Documentation/config-drive.md
Normal file
@@ -0,0 +1,40 @@
|
||||
# Distribution via Config Drive
|
||||
|
||||
CoreOS supports providing configuration data via [config drive][config-drive]
|
||||
disk images. Currently only providing a single script or cloud config file is
|
||||
supported.
|
||||
|
||||
[config-drive]: http://docs.openstack.org/user-guide/cli_config_drive.html
|
||||
|
||||
## Contents and Format
|
||||
|
||||
The image should be a single FAT or ISO9660 file system with the label
|
||||
`config-2` and the configuration data should be located at
|
||||
`openstack/latest/user_data`.
|
||||
|
||||
For example, to wrap up a config named `user_data` in a config drive image:
|
||||
|
||||
```sh
|
||||
mkdir -p /tmp/new-drive/openstack/latest
|
||||
cp user_data /tmp/new-drive/openstack/latest/user_data
|
||||
mkisofs -R -V config-2 -o configdrive.iso /tmp/new-drive
|
||||
rm -r /tmp/new-drive
|
||||
```
|
||||
|
||||
If on OS X, replace the `mkisofs` invocation with:
|
||||
|
||||
```sh
|
||||
hdiutil makehybrid -iso -joliet -default-volume-name config-2 -o configdrive.iso /tmp/new-drive
|
||||
```
|
||||
|
||||
## QEMU virtfs
|
||||
|
||||
One exception to the above, when using QEMU it is possible to skip creating an
|
||||
image and use a plain directory containing the same contents:
|
||||
|
||||
```sh
|
||||
qemu-system-x86_64 \
|
||||
-fsdev local,id=conf,security_model=none,readonly,path=/tmp/new-drive \
|
||||
-device virtio-9p-pci,fsdev=conf,mount_tag=config-2 \
|
||||
[usual qemu options here...]
|
||||
```
|
||||
27
config/cloudinit/Documentation/debian-interfaces.md
Normal file
27
config/cloudinit/Documentation/debian-interfaces.md
Normal file
@@ -0,0 +1,27 @@
|
||||
#Debian Interfaces#
|
||||
**WARNING**: This option is EXPERIMENTAL and may change or be removed at any
|
||||
point.
|
||||
There is basic support for converting from a Debian network configuration to
|
||||
networkd unit files. The -convert-netconf=debian option is used to activate
|
||||
this feature.
|
||||
|
||||
#convert-netconf#
|
||||
Default: ""
|
||||
Read the network config provided in cloud-drive and translate it from the
|
||||
specified format into networkd unit files (requires the -from-configdrive
|
||||
flag). Currently only supports "debian" which provides support for a small
|
||||
subset of the [Debian network configuration]
|
||||
(https://wiki.debian.org/NetworkConfiguration). These options include:
|
||||
|
||||
- interface config methods
|
||||
- static
|
||||
- address/netmask
|
||||
- gateway
|
||||
- hwaddress
|
||||
- dns-nameservers
|
||||
- dhcp
|
||||
- hwaddress
|
||||
- manual
|
||||
- loopback
|
||||
- vlan_raw_device
|
||||
- bond-slaves
|
||||
36
config/cloudinit/Documentation/vmware-guestinfo.md
Normal file
36
config/cloudinit/Documentation/vmware-guestinfo.md
Normal file
@@ -0,0 +1,36 @@
|
||||
# VMWare Guestinfo Interface
|
||||
|
||||
## Cloud-Config VMWare Guestinfo Variables
|
||||
|
||||
coreos-cloudinit accepts configuration from the VMware RPC API's *guestinfo*
|
||||
facility. This datasource can be enabled with the `--from-vmware-guestinfo`
|
||||
flag to coreos-cloudinit.
|
||||
|
||||
The following guestinfo variables are recognized and processed by cloudinit
|
||||
when passed from the hypervisor to the virtual machine at boot time. Note that
|
||||
property names are prefixed with `guestinfo.` in the VMX, e.g., `guestinfo.hostname`.
|
||||
|
||||
| guestinfo variable | type |
|
||||
|:--------------------------------------|:--------------------------------|
|
||||
| `hostname` | `hostname` |
|
||||
| `interface.<n>.name` | `string` |
|
||||
| `interface.<n>.mac` | `MAC address` |
|
||||
| `interface.<n>.dhcp` | `{"yes", "no"}` |
|
||||
| `interface.<n>.role` | `{"public", "private"}` |
|
||||
| `interface.<n>.ip.<m>.address` | `CIDR IP address` |
|
||||
| `interface.<n>.route.<l>.gateway` | `IP address` |
|
||||
| `interface.<n>.route.<l>.destination` | `CIDR IP address` |
|
||||
| `dns.server.<x>` | `IP address` |
|
||||
| `dns.domain.<y>` | `DNS search domain` |
|
||||
| `coreos.config.data` | `string` |
|
||||
| `coreos.config.data.encoding` | `{"", "base64", "gzip+base64"}` |
|
||||
| `coreos.config.url` | `URL` |
|
||||
|
||||
Note: "n", "m", "l", "x" and "y" are 0-indexed, incrementing integers. The
|
||||
identifier for an `interface` does not correspond to anything outside of this
|
||||
configuration; it serves only to distinguish between multiple `interface`s.
|
||||
|
||||
The guide to [booting on VMWare][bootvmware] is the starting point for more
|
||||
information about configuring and running CoreOS on VMWare.
|
||||
|
||||
[bootvmware]: https://github.com/coreos/docs/blob/master/os/booting-on-vmware.md
|
||||
@@ -1,3 +1,7 @@
|
||||
**NOTE**: This project has been superseded by [Ignition][ignition] and is no longer under active development. Please direct all development efforts to Ignition.
|
||||
|
||||
[ignition]: https://github.com/coreos/ignition
|
||||
|
||||
# coreos-cloudinit [](https://travis-ci.org/coreos/coreos-cloudinit)
|
||||
|
||||
coreos-cloudinit enables a user to customize CoreOS machines by providing either a cloud-config document or an executable script through user-data.
|
||||
@@ -9,8 +13,8 @@ Additionally, several [CoreOS-specific options][custom-cloud-config] have been i
|
||||
All supported cloud-config parameters are [documented here][all-cloud-config].
|
||||
|
||||
[official-cloud-config]: http://cloudinit.readthedocs.org/en/latest/topics/format.html#cloud-config-data
|
||||
[custom-cloud-config]: https://github.com/coreos/coreos-cloudinit/blob/master/Documentation/cloud-config.md#coreos-parameters
|
||||
[all-cloud-config]: https://github.com/coreos/coreos-cloudinit/tree/master/Documentation/cloud-config.md
|
||||
[custom-cloud-config]: https://github.com/rancher/os/config/cloudinit/blob/master/Documentation/cloud-config.md#coreos-parameters
|
||||
[all-cloud-config]: https://github.com/rancher/os/config/cloudinit/tree/master/Documentation/cloud-config.md
|
||||
|
||||
The following is an example cloud-config document:
|
||||
|
||||
@@ -4,7 +4,7 @@ NAME="coreos-cloudinit"
|
||||
ORG_PATH="github.com/coreos"
|
||||
REPO_PATH="${ORG_PATH}/${NAME}"
|
||||
VERSION=$(git describe --dirty --tags)
|
||||
GLDFLAGS="-X main.version \"${VERSION}\""
|
||||
GLDFLAGS="-X main.version=\"${VERSION}\""
|
||||
|
||||
if [ ! -h gopath/src/${REPO_PATH} ]; then
|
||||
mkdir -p gopath/src/${ORG_PATH}
|
||||
547
config/cloudinit/config/config_test.go
Normal file
547
config/cloudinit/config/config_test.go
Normal file
@@ -0,0 +1,547 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestNewCloudConfig(t *testing.T) {
|
||||
tests := []struct {
|
||||
contents string
|
||||
|
||||
config CloudConfig
|
||||
}{
|
||||
{},
|
||||
{
|
||||
contents: "#cloud-config\nwrite_files:\n - path: underscore",
|
||||
config: CloudConfig{WriteFiles: []File{{Path: "underscore"}}},
|
||||
},
|
||||
{
|
||||
contents: "#cloud-config\nwrite-files:\n - path: hyphen",
|
||||
config: CloudConfig{WriteFiles: []File{{Path: "hyphen"}}},
|
||||
},
|
||||
{
|
||||
contents: "#cloud-config\ncoreos:\n update:\n reboot-strategy: off",
|
||||
config: CloudConfig{CoreOS: CoreOS{Update: Update{RebootStrategy: "off"}}},
|
||||
},
|
||||
{
|
||||
contents: "#cloud-config\ncoreos:\n update:\n reboot-strategy: false",
|
||||
config: CloudConfig{CoreOS: CoreOS{Update: Update{RebootStrategy: "false"}}},
|
||||
},
|
||||
{
|
||||
contents: "#cloud-config\nwrite_files:\n - permissions: 0744",
|
||||
config: CloudConfig{WriteFiles: []File{{RawFilePermissions: "0744"}}},
|
||||
},
|
||||
{
|
||||
contents: "#cloud-config\nwrite_files:\n - permissions: 744",
|
||||
config: CloudConfig{WriteFiles: []File{{RawFilePermissions: "744"}}},
|
||||
},
|
||||
{
|
||||
contents: "#cloud-config\nwrite_files:\n - permissions: '0744'",
|
||||
config: CloudConfig{WriteFiles: []File{{RawFilePermissions: "0744"}}},
|
||||
},
|
||||
{
|
||||
contents: "#cloud-config\nwrite_files:\n - permissions: '744'",
|
||||
config: CloudConfig{WriteFiles: []File{{RawFilePermissions: "744"}}},
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
config, err := NewCloudConfig(tt.contents)
|
||||
if err != nil {
|
||||
t.Errorf("bad error (test case #%d): want %v, got %s", i, nil, err)
|
||||
}
|
||||
if !reflect.DeepEqual(&tt.config, config) {
|
||||
t.Errorf("bad config (test case #%d): want %#v, got %#v", i, tt.config, config)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewCloudConfigDecode(t *testing.T) {
|
||||
// //all of these decode to "bar"
|
||||
contentTests := map[string]string{
|
||||
"base64": "YmFy",
|
||||
"b64": "YmFy",
|
||||
// theoretically gz+gzip are supported but they break yaml
|
||||
// "gz": "\x1f\x8b\x08\x08w\x14\x87T\x02\xffok\x00KJ,\x02\x00\xaa\x8c\xffv\x03\x00\x00\x00",
|
||||
// "gzip": "\x1f\x8b\x08\x08w\x14\x87T\x02\xffok\x00KJ,\x02\x00\xaa\x8c\xffv\x03\x00\x00\x00",
|
||||
"gz+base64": "H4sIABMVh1QAA0tKLAIAqoz/dgMAAAA=",
|
||||
"gzip+base64": "H4sIABMVh1QAA0tKLAIAqoz/dgMAAAA=",
|
||||
"gz+b64": "H4sIABMVh1QAA0tKLAIAqoz/dgMAAAA=",
|
||||
"gzip+b64": "H4sIABMVh1QAA0tKLAIAqoz/dgMAAAA=",
|
||||
}
|
||||
|
||||
type testCase struct {
|
||||
contents string
|
||||
config CloudConfig
|
||||
}
|
||||
|
||||
var decodingTests []testCase
|
||||
for name, content := range contentTests {
|
||||
decodingTests = append(decodingTests, testCase{
|
||||
contents: fmt.Sprintf("#cloud-config\nwrite_files:\n - encoding: %q\n content: |\n %s", name, content),
|
||||
config: CloudConfig{WriteFiles: []File{{Content: "bar"}}},
|
||||
})
|
||||
}
|
||||
|
||||
for i, tt := range decodingTests {
|
||||
config, err := NewCloudConfig(tt.contents)
|
||||
if err != nil {
|
||||
t.Errorf("bad error (test case #%d): want %v, got %s", i, nil, err)
|
||||
}
|
||||
|
||||
if err := config.Decode(); err != nil {
|
||||
t.Errorf("bad error (test case #%d): want %v, got %s", i, nil, err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(&tt.config, config) {
|
||||
t.Errorf("bad config (test case #%d): want %#v, got %#v", i, tt.config, config)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestIsZero(t *testing.T) {
|
||||
tests := []struct {
|
||||
c interface{}
|
||||
|
||||
empty bool
|
||||
}{
|
||||
{struct{}{}, true},
|
||||
{struct{ a, b string }{}, true},
|
||||
{struct{ A, b string }{}, true},
|
||||
{struct{ A, B string }{}, true},
|
||||
{struct{ A string }{A: "hello"}, false},
|
||||
{struct{ A int }{}, true},
|
||||
{struct{ A int }{A: 1}, false},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
if empty := IsZero(tt.c); tt.empty != empty {
|
||||
t.Errorf("bad result (%q): want %t, got %t", tt.c, tt.empty, empty)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAssertStructValid(t *testing.T) {
|
||||
tests := []struct {
|
||||
c interface{}
|
||||
|
||||
err error
|
||||
}{
|
||||
{struct{}{}, nil},
|
||||
{struct {
|
||||
A, b string `valid:"^1|2$"`
|
||||
}{}, nil},
|
||||
{struct {
|
||||
A, b string `valid:"^1|2$"`
|
||||
}{A: "1", b: "2"}, nil},
|
||||
{struct {
|
||||
A, b string `valid:"^1|2$"`
|
||||
}{A: "1", b: "hello"}, nil},
|
||||
{struct {
|
||||
A, b string `valid:"^1|2$"`
|
||||
}{A: "hello", b: "2"}, &ErrorValid{Value: "hello", Field: "A", Valid: "^1|2$"}},
|
||||
{struct {
|
||||
A, b int `valid:"^1|2$"`
|
||||
}{}, nil},
|
||||
{struct {
|
||||
A, b int `valid:"^1|2$"`
|
||||
}{A: 1, b: 2}, nil},
|
||||
{struct {
|
||||
A, b int `valid:"^1|2$"`
|
||||
}{A: 1, b: 9}, nil},
|
||||
{struct {
|
||||
A, b int `valid:"^1|2$"`
|
||||
}{A: 9, b: 2}, &ErrorValid{Value: "9", Field: "A", Valid: "^1|2$"}},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
if err := AssertStructValid(tt.c); !reflect.DeepEqual(tt.err, err) {
|
||||
t.Errorf("bad result (%q): want %q, got %q", tt.c, tt.err, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigCompile(t *testing.T) {
|
||||
tests := []interface{}{
|
||||
Etcd{},
|
||||
File{},
|
||||
Flannel{},
|
||||
Fleet{},
|
||||
Locksmith{},
|
||||
OEM{},
|
||||
Unit{},
|
||||
Update{},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
ttt := reflect.TypeOf(tt)
|
||||
for i := 0; i < ttt.NumField(); i++ {
|
||||
ft := ttt.Field(i)
|
||||
if !isFieldExported(ft) {
|
||||
continue
|
||||
}
|
||||
|
||||
if _, err := regexp.Compile(ft.Tag.Get("valid")); err != nil {
|
||||
t.Errorf("bad regexp(%s.%s): want %v, got %s", ttt.Name(), ft.Name, nil, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCloudConfigUnknownKeys(t *testing.T) {
|
||||
contents := `
|
||||
coreos:
|
||||
etcd:
|
||||
discovery: "https://discovery.etcd.io/827c73219eeb2fa5530027c37bf18877"
|
||||
coreos_unknown:
|
||||
foo: "bar"
|
||||
section_unknown:
|
||||
dunno:
|
||||
something
|
||||
bare_unknown:
|
||||
bar
|
||||
write_files:
|
||||
- content: fun
|
||||
path: /var/party
|
||||
file_unknown: nofun
|
||||
users:
|
||||
- name: fry
|
||||
passwd: somehash
|
||||
user_unknown: philip
|
||||
hostname:
|
||||
foo
|
||||
`
|
||||
cfg, err := NewCloudConfig(contents)
|
||||
if err != nil {
|
||||
t.Fatalf("error instantiating CloudConfig with unknown keys: %v", err)
|
||||
}
|
||||
if cfg.Hostname != "foo" {
|
||||
t.Fatalf("hostname not correctly set when invalid keys are present")
|
||||
}
|
||||
if cfg.CoreOS.Etcd.Discovery != "https://discovery.etcd.io/827c73219eeb2fa5530027c37bf18877" {
|
||||
t.Fatalf("etcd section not correctly set when invalid keys are present")
|
||||
}
|
||||
if len(cfg.WriteFiles) < 1 || cfg.WriteFiles[0].Content != "fun" || cfg.WriteFiles[0].Path != "/var/party" {
|
||||
t.Fatalf("write_files section not correctly set when invalid keys are present")
|
||||
}
|
||||
if len(cfg.Users) < 1 || cfg.Users[0].Name != "fry" || cfg.Users[0].PasswordHash != "somehash" {
|
||||
t.Fatalf("users section not correctly set when invalid keys are present")
|
||||
}
|
||||
}
|
||||
|
||||
// Assert that the parsing of a cloud config file "generally works"
|
||||
func TestCloudConfigEmpty(t *testing.T) {
|
||||
cfg, err := NewCloudConfig("")
|
||||
if err != nil {
|
||||
t.Fatalf("Encountered unexpected error :%v", err)
|
||||
}
|
||||
|
||||
keys := cfg.SSHAuthorizedKeys
|
||||
if len(keys) != 0 {
|
||||
t.Error("Parsed incorrect number of SSH keys")
|
||||
}
|
||||
|
||||
if len(cfg.WriteFiles) != 0 {
|
||||
t.Error("Expected zero WriteFiles")
|
||||
}
|
||||
|
||||
if cfg.Hostname != "" {
|
||||
t.Errorf("Expected hostname to be empty, got '%s'", cfg.Hostname)
|
||||
}
|
||||
}
|
||||
|
||||
// Assert that the parsing of a cloud config file "generally works"
|
||||
func TestCloudConfig(t *testing.T) {
|
||||
contents := `
|
||||
coreos:
|
||||
etcd:
|
||||
discovery: "https://discovery.etcd.io/827c73219eeb2fa5530027c37bf18877"
|
||||
update:
|
||||
reboot_strategy: reboot
|
||||
units:
|
||||
- name: 50-eth0.network
|
||||
runtime: yes
|
||||
content: '[Match]
|
||||
|
||||
Name=eth47
|
||||
|
||||
|
||||
[Network]
|
||||
|
||||
Address=10.209.171.177/19
|
||||
|
||||
'
|
||||
oem:
|
||||
id: rackspace
|
||||
name: Rackspace Cloud Servers
|
||||
version_id: 168.0.0
|
||||
home_url: https://www.rackspace.com/cloud/servers/
|
||||
bug_report_url: https://github.com/coreos/coreos-overlay
|
||||
ssh_authorized_keys:
|
||||
- foobar
|
||||
- foobaz
|
||||
write_files:
|
||||
- content: |
|
||||
penny
|
||||
elroy
|
||||
path: /etc/dogepack.conf
|
||||
permissions: '0644'
|
||||
owner: root:dogepack
|
||||
hostname: trontastic
|
||||
`
|
||||
cfg, err := NewCloudConfig(contents)
|
||||
if err != nil {
|
||||
t.Fatalf("Encountered unexpected error :%v", err)
|
||||
}
|
||||
|
||||
keys := cfg.SSHAuthorizedKeys
|
||||
if len(keys) != 2 {
|
||||
t.Error("Parsed incorrect number of SSH keys")
|
||||
} else if keys[0] != "foobar" {
|
||||
t.Error("Expected first SSH key to be 'foobar'")
|
||||
} else if keys[1] != "foobaz" {
|
||||
t.Error("Expected first SSH key to be 'foobaz'")
|
||||
}
|
||||
|
||||
if len(cfg.WriteFiles) != 1 {
|
||||
t.Error("Failed to parse correct number of write_files")
|
||||
} else {
|
||||
wf := cfg.WriteFiles[0]
|
||||
if wf.Content != "penny\nelroy\n" {
|
||||
t.Errorf("WriteFile has incorrect contents '%s'", wf.Content)
|
||||
}
|
||||
if wf.Encoding != "" {
|
||||
t.Errorf("WriteFile has incorrect encoding %s", wf.Encoding)
|
||||
}
|
||||
if wf.RawFilePermissions != "0644" {
|
||||
t.Errorf("WriteFile has incorrect permissions %s", wf.RawFilePermissions)
|
||||
}
|
||||
if wf.Path != "/etc/dogepack.conf" {
|
||||
t.Errorf("WriteFile has incorrect path %s", wf.Path)
|
||||
}
|
||||
if wf.Owner != "root:dogepack" {
|
||||
t.Errorf("WriteFile has incorrect owner %s", wf.Owner)
|
||||
}
|
||||
}
|
||||
|
||||
if len(cfg.CoreOS.Units) != 1 {
|
||||
t.Error("Failed to parse correct number of units")
|
||||
} else {
|
||||
u := cfg.CoreOS.Units[0]
|
||||
expect := `[Match]
|
||||
Name=eth47
|
||||
|
||||
[Network]
|
||||
Address=10.209.171.177/19
|
||||
`
|
||||
if u.Content != expect {
|
||||
t.Errorf("Unit has incorrect contents '%s'.\nExpected '%s'.", u.Content, expect)
|
||||
}
|
||||
if u.Runtime != true {
|
||||
t.Errorf("Unit has incorrect runtime value")
|
||||
}
|
||||
if u.Name != "50-eth0.network" {
|
||||
t.Errorf("Unit has incorrect name %s", u.Name)
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.CoreOS.OEM.ID != "rackspace" {
|
||||
t.Errorf("Failed parsing coreos.oem. Expected ID 'rackspace', got %q.", cfg.CoreOS.OEM.ID)
|
||||
}
|
||||
|
||||
if cfg.Hostname != "trontastic" {
|
||||
t.Errorf("Failed to parse hostname")
|
||||
}
|
||||
if cfg.CoreOS.Update.RebootStrategy != "reboot" {
|
||||
t.Errorf("Failed to parse locksmith strategy")
|
||||
}
|
||||
}
|
||||
|
||||
// Assert that our interface conversion doesn't panic
|
||||
func TestCloudConfigKeysNotList(t *testing.T) {
|
||||
contents := `
|
||||
ssh_authorized_keys:
|
||||
- foo: bar
|
||||
`
|
||||
cfg, err := NewCloudConfig(contents)
|
||||
if err != nil {
|
||||
t.Fatalf("Encountered unexpected error: %v", err)
|
||||
}
|
||||
|
||||
keys := cfg.SSHAuthorizedKeys
|
||||
if len(keys) != 0 {
|
||||
t.Error("Parsed incorrect number of SSH keys")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCloudConfigSerializationHeader(t *testing.T) {
|
||||
cfg, _ := NewCloudConfig("")
|
||||
contents := cfg.String()
|
||||
header := strings.SplitN(contents, "\n", 2)[0]
|
||||
if header != "#cloud-config" {
|
||||
t.Fatalf("Serialized config did not have expected header")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCloudConfigUsers(t *testing.T) {
|
||||
contents := `
|
||||
users:
|
||||
- name: elroy
|
||||
passwd: somehash
|
||||
ssh_authorized_keys:
|
||||
- somekey
|
||||
gecos: arbitrary comment
|
||||
homedir: /home/place
|
||||
no_create_home: yes
|
||||
primary_group: things
|
||||
groups:
|
||||
- ping
|
||||
- pong
|
||||
no_user_group: true
|
||||
system: y
|
||||
no_log_init: True
|
||||
shell: /bin/sh
|
||||
`
|
||||
cfg, err := NewCloudConfig(contents)
|
||||
if err != nil {
|
||||
t.Fatalf("Encountered unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if len(cfg.Users) != 1 {
|
||||
t.Fatalf("Parsed %d users, expected 1", len(cfg.Users))
|
||||
}
|
||||
|
||||
user := cfg.Users[0]
|
||||
|
||||
if user.Name != "elroy" {
|
||||
t.Errorf("User name is %q, expected 'elroy'", user.Name)
|
||||
}
|
||||
|
||||
if user.PasswordHash != "somehash" {
|
||||
t.Errorf("User passwd is %q, expected 'somehash'", user.PasswordHash)
|
||||
}
|
||||
|
||||
if keys := user.SSHAuthorizedKeys; len(keys) != 1 {
|
||||
t.Errorf("Parsed %d ssh keys, expected 1", len(keys))
|
||||
} else {
|
||||
key := user.SSHAuthorizedKeys[0]
|
||||
if key != "somekey" {
|
||||
t.Errorf("User SSH key is %q, expected 'somekey'", key)
|
||||
}
|
||||
}
|
||||
|
||||
if user.GECOS != "arbitrary comment" {
|
||||
t.Errorf("Failed to parse gecos field, got %q", user.GECOS)
|
||||
}
|
||||
|
||||
if user.Homedir != "/home/place" {
|
||||
t.Errorf("Failed to parse homedir field, got %q", user.Homedir)
|
||||
}
|
||||
|
||||
if !user.NoCreateHome {
|
||||
t.Errorf("Failed to parse no_create_home field")
|
||||
}
|
||||
|
||||
if user.PrimaryGroup != "things" {
|
||||
t.Errorf("Failed to parse primary_group field, got %q", user.PrimaryGroup)
|
||||
}
|
||||
|
||||
if len(user.Groups) != 2 {
|
||||
t.Errorf("Failed to parse 2 goups, got %d", len(user.Groups))
|
||||
} else {
|
||||
if user.Groups[0] != "ping" {
|
||||
t.Errorf("First group was %q, not expected value 'ping'", user.Groups[0])
|
||||
}
|
||||
if user.Groups[1] != "pong" {
|
||||
t.Errorf("First group was %q, not expected value 'pong'", user.Groups[1])
|
||||
}
|
||||
}
|
||||
|
||||
if !user.NoUserGroup {
|
||||
t.Errorf("Failed to parse no_user_group field")
|
||||
}
|
||||
|
||||
if !user.System {
|
||||
t.Errorf("Failed to parse system field")
|
||||
}
|
||||
|
||||
if !user.NoLogInit {
|
||||
t.Errorf("Failed to parse no_log_init field")
|
||||
}
|
||||
|
||||
if user.Shell != "/bin/sh" {
|
||||
t.Errorf("Failed to parse shell field, got %q", user.Shell)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCloudConfigUsersGithubUser(t *testing.T) {
|
||||
|
||||
contents := `
|
||||
users:
|
||||
- name: elroy
|
||||
coreos_ssh_import_github: bcwaldon
|
||||
`
|
||||
cfg, err := NewCloudConfig(contents)
|
||||
if err != nil {
|
||||
t.Fatalf("Encountered unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if len(cfg.Users) != 1 {
|
||||
t.Fatalf("Parsed %d users, expected 1", len(cfg.Users))
|
||||
}
|
||||
|
||||
user := cfg.Users[0]
|
||||
|
||||
if user.Name != "elroy" {
|
||||
t.Errorf("User name is %q, expected 'elroy'", user.Name)
|
||||
}
|
||||
|
||||
if user.SSHImportGithubUser != "bcwaldon" {
|
||||
t.Errorf("github user is %q, expected 'bcwaldon'", user.SSHImportGithubUser)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCloudConfigUsersSSHImportURL(t *testing.T) {
|
||||
contents := `
|
||||
users:
|
||||
- name: elroy
|
||||
coreos_ssh_import_url: https://token:x-auth-token@github.enterprise.com/api/v3/polvi/keys
|
||||
`
|
||||
cfg, err := NewCloudConfig(contents)
|
||||
if err != nil {
|
||||
t.Fatalf("Encountered unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if len(cfg.Users) != 1 {
|
||||
t.Fatalf("Parsed %d users, expected 1", len(cfg.Users))
|
||||
}
|
||||
|
||||
user := cfg.Users[0]
|
||||
|
||||
if user.Name != "elroy" {
|
||||
t.Errorf("User name is %q, expected 'elroy'", user.Name)
|
||||
}
|
||||
|
||||
if user.SSHImportURL != "https://token:x-auth-token@github.enterprise.com/api/v3/polvi/keys" {
|
||||
t.Errorf("ssh import url is %q, expected 'https://token:x-auth-token@github.enterprise.com/api/v3/polvi/keys'", user.SSHImportURL)
|
||||
}
|
||||
}
|
||||
69
config/cloudinit/config/file_test.go
Normal file
69
config/cloudinit/config/file_test.go
Normal file
@@ -0,0 +1,69 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestEncodingValid(t *testing.T) {
|
||||
tests := []struct {
|
||||
value string
|
||||
|
||||
isValid bool
|
||||
}{
|
||||
{value: "base64", isValid: true},
|
||||
{value: "b64", isValid: true},
|
||||
{value: "gz", isValid: true},
|
||||
{value: "gzip", isValid: true},
|
||||
{value: "gz+base64", isValid: true},
|
||||
{value: "gzip+base64", isValid: true},
|
||||
{value: "gz+b64", isValid: true},
|
||||
{value: "gzip+b64", isValid: true},
|
||||
{value: "gzzzzbase64", isValid: false},
|
||||
{value: "gzipppbase64", isValid: false},
|
||||
{value: "unknown", isValid: false},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
isValid := (nil == AssertStructValid(File{Encoding: tt.value}))
|
||||
if tt.isValid != isValid {
|
||||
t.Errorf("bad assert (%s): want %t, got %t", tt.value, tt.isValid, isValid)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRawFilePermissionsValid(t *testing.T) {
|
||||
tests := []struct {
|
||||
value string
|
||||
|
||||
isValid bool
|
||||
}{
|
||||
{value: "744", isValid: true},
|
||||
{value: "0744", isValid: true},
|
||||
{value: "1744", isValid: true},
|
||||
{value: "01744", isValid: true},
|
||||
{value: "11744", isValid: false},
|
||||
{value: "rwxr--r--", isValid: false},
|
||||
{value: "800", isValid: false},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
isValid := (nil == AssertStructValid(File{RawFilePermissions: tt.value}))
|
||||
if tt.isValid != isValid {
|
||||
t.Errorf("bad assert (%s): want %t, got %t", tt.value, tt.isValid, isValid)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -20,6 +20,8 @@ type Flannel struct {
|
||||
EtcdCertFile string `yaml:"etcd_certfile" env:"FLANNELD_ETCD_CERTFILE"`
|
||||
EtcdKeyFile string `yaml:"etcd_keyfile" env:"FLANNELD_ETCD_KEYFILE"`
|
||||
EtcdPrefix string `yaml:"etcd_prefix" env:"FLANNELD_ETCD_PREFIX"`
|
||||
EtcdUsername string `yaml:"etcd_username" env:"FLANNELD_ETCD_USERNAME"`
|
||||
EtcdPassword string `yaml:"etcd_password" env:"FLANNELD_ETCD_PASSWORD"`
|
||||
IPMasq string `yaml:"ip_masq" env:"FLANNELD_IP_MASQ"`
|
||||
SubnetFile string `yaml:"subnet_file" env:"FLANNELD_SUBNET_FILE"`
|
||||
Iface string `yaml:"interface" env:"FLANNELD_IFACE"`
|
||||
@@ -25,6 +25,8 @@ type Fleet struct {
|
||||
EtcdKeyPrefix string `yaml:"etcd_key_prefix" env:"FLEET_ETCD_KEY_PREFIX"`
|
||||
EtcdRequestTimeout float64 `yaml:"etcd_request_timeout" env:"FLEET_ETCD_REQUEST_TIMEOUT"`
|
||||
EtcdServers string `yaml:"etcd_servers" env:"FLEET_ETCD_SERVERS"`
|
||||
EtcdUsername string `yaml:"etcd_username" env:"FLEET_ETCD_USERNAME"`
|
||||
EtcdPassword string `yaml:"etcd_password" env:"FLEET_ETCD_PASSWORD"`
|
||||
Metadata string `yaml:"metadata" env:"FLEET_METADATA"`
|
||||
PublicIP string `yaml:"public_ip" env:"FLEET_PUBLIC_IP"`
|
||||
TokenLimit int `yaml:"token_limit" env:"FLEET_TOKEN_LIMIT"`
|
||||
@@ -19,6 +19,8 @@ type Locksmith struct {
|
||||
EtcdCAFile string `yaml:"etcd_cafile" env:"LOCKSMITHD_ETCD_CAFILE"`
|
||||
EtcdCertFile string `yaml:"etcd_certfile" env:"LOCKSMITHD_ETCD_CERTFILE"`
|
||||
EtcdKeyFile string `yaml:"etcd_keyfile" env:"LOCKSMITHD_ETCD_KEYFILE"`
|
||||
EtcdUsername string `yaml:"etcd_username" env:"LOCKSMITHD_ETCD_USERNAME"`
|
||||
EtcdPassword string `yaml:"etcd_password" env:"LOCKSMITHD_ETCD_PASSWORD"`
|
||||
Group string `yaml:"group" env:"LOCKSMITHD_GROUP"`
|
||||
RebootWindowStart string `yaml:"window_start" env:"REBOOT_WINDOW_START" valid:"^((?i:sun|mon|tue|wed|thu|fri|sat|sun) )?0*([0-9]|1[0-9]|2[0-3]):0*([0-9]|[1-5][0-9])$"`
|
||||
RebootWindowLength string `yaml:"window_length" env:"REBOOT_WINDOW_LENGTH" valid:"^[-+]?([0-9]*(\\.[0-9]*)?[a-z]+)+$"`
|
||||
76
config/cloudinit/config/locksmith_test.go
Normal file
76
config/cloudinit/config/locksmith_test.go
Normal file
@@ -0,0 +1,76 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestRebootWindowStart(t *testing.T) {
|
||||
tests := []struct {
|
||||
value string
|
||||
|
||||
isValid bool
|
||||
}{
|
||||
{value: "Sun 0:0", isValid: true},
|
||||
{value: "Sun 00:00", isValid: true},
|
||||
{value: "sUn 23:59", isValid: true},
|
||||
{value: "mon 0:0", isValid: true},
|
||||
{value: "tue 0:0", isValid: true},
|
||||
{value: "tues 0:0", isValid: false},
|
||||
{value: "wed 0:0", isValid: true},
|
||||
{value: "thu 0:0", isValid: true},
|
||||
{value: "thur 0:0", isValid: false},
|
||||
{value: "fri 0:0", isValid: true},
|
||||
{value: "sat 0:0", isValid: true},
|
||||
{value: "sat00:00", isValid: false},
|
||||
{value: "00:00", isValid: true},
|
||||
{value: "10:10", isValid: true},
|
||||
{value: "20:20", isValid: true},
|
||||
{value: "20:30", isValid: true},
|
||||
{value: "20:40", isValid: true},
|
||||
{value: "20:50", isValid: true},
|
||||
{value: "20:60", isValid: false},
|
||||
{value: "24:00", isValid: false},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
isValid := (nil == AssertStructValid(Locksmith{RebootWindowStart: tt.value}))
|
||||
if tt.isValid != isValid {
|
||||
t.Errorf("bad assert (%s): want %t, got %t", tt.value, tt.isValid, isValid)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRebootWindowLength(t *testing.T) {
|
||||
tests := []struct {
|
||||
value string
|
||||
|
||||
isValid bool
|
||||
}{
|
||||
{value: "1h", isValid: true},
|
||||
{value: "1d", isValid: true},
|
||||
{value: "0d", isValid: true},
|
||||
{value: "0.5h", isValid: true},
|
||||
{value: "0.5.0h", isValid: false},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
isValid := (nil == AssertStructValid(Locksmith{RebootWindowLength: tt.value}))
|
||||
if tt.isValid != isValid {
|
||||
t.Errorf("bad assert (%s): want %t, got %t", tt.value, tt.isValid, isValid)
|
||||
}
|
||||
}
|
||||
}
|
||||
46
config/cloudinit/config/unit_test.go
Normal file
46
config/cloudinit/config/unit_test.go
Normal file
@@ -0,0 +1,46 @@
|
||||
/*
|
||||
Copyright 2014 CoreOS, Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestCommandValid(t *testing.T) {
|
||||
tests := []struct {
|
||||
value string
|
||||
|
||||
isValid bool
|
||||
}{
|
||||
{value: "start", isValid: true},
|
||||
{value: "stop", isValid: true},
|
||||
{value: "restart", isValid: true},
|
||||
{value: "reload", isValid: true},
|
||||
{value: "try-restart", isValid: true},
|
||||
{value: "reload-or-restart", isValid: true},
|
||||
{value: "reload-or-try-restart", isValid: true},
|
||||
{value: "tryrestart", isValid: false},
|
||||
{value: "unknown", isValid: false},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
isValid := (nil == AssertStructValid(Unit{Command: tt.value}))
|
||||
if tt.isValid != isValid {
|
||||
t.Errorf("bad assert (%s): want %t, got %t", tt.value, tt.isValid, isValid)
|
||||
}
|
||||
}
|
||||
}
|
||||
43
config/cloudinit/config/update_test.go
Normal file
43
config/cloudinit/config/update_test.go
Normal file
@@ -0,0 +1,43 @@
|
||||
/*
|
||||
Copyright 2014 CoreOS, Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestRebootStrategyValid(t *testing.T) {
|
||||
tests := []struct {
|
||||
value string
|
||||
|
||||
isValid bool
|
||||
}{
|
||||
{value: "best-effort", isValid: true},
|
||||
{value: "etcd-lock", isValid: true},
|
||||
{value: "reboot", isValid: true},
|
||||
{value: "off", isValid: true},
|
||||
{value: "besteffort", isValid: false},
|
||||
{value: "unknown", isValid: false},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
isValid := (nil == AssertStructValid(Update{RebootStrategy: tt.value}))
|
||||
if tt.isValid != isValid {
|
||||
t.Errorf("bad assert (%s): want %t, got %t", tt.value, tt.isValid, isValid)
|
||||
}
|
||||
}
|
||||
}
|
||||
52
config/cloudinit/config/validate/context.go
Normal file
52
config/cloudinit/config/validate/context.go
Normal file
@@ -0,0 +1,52 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package validate
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Context represents the current position within a newline-delimited string.
|
||||
// Each line is loaded, one by one, into currentLine (newline omitted) and
|
||||
// lineNumber keeps track of its position within the original string.
|
||||
type Context struct {
|
||||
currentLine string
|
||||
remainingLines string
|
||||
lineNumber int
|
||||
}
|
||||
|
||||
// Increment moves the Context to the next line (if available).
|
||||
func (c *Context) Increment() {
|
||||
if c.currentLine == "" && c.remainingLines == "" {
|
||||
return
|
||||
}
|
||||
|
||||
lines := strings.SplitN(c.remainingLines, "\n", 2)
|
||||
c.currentLine = lines[0]
|
||||
if len(lines) == 2 {
|
||||
c.remainingLines = lines[1]
|
||||
} else {
|
||||
c.remainingLines = ""
|
||||
}
|
||||
c.lineNumber++
|
||||
}
|
||||
|
||||
// NewContext creates a Context from the provided data. It strips out all
|
||||
// carriage returns and moves to the first line (if available).
|
||||
func NewContext(content []byte) Context {
|
||||
c := Context{remainingLines: strings.Replace(string(content), "\r", "", -1)}
|
||||
c.Increment()
|
||||
return c
|
||||
}
|
||||
131
config/cloudinit/config/validate/context_test.go
Normal file
131
config/cloudinit/config/validate/context_test.go
Normal file
@@ -0,0 +1,131 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package validate
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestNewContext(t *testing.T) {
|
||||
tests := []struct {
|
||||
in string
|
||||
|
||||
out Context
|
||||
}{
|
||||
{
|
||||
out: Context{
|
||||
currentLine: "",
|
||||
remainingLines: "",
|
||||
lineNumber: 0,
|
||||
},
|
||||
},
|
||||
{
|
||||
in: "this\r\nis\r\na\r\ntest",
|
||||
out: Context{
|
||||
currentLine: "this",
|
||||
remainingLines: "is\na\ntest",
|
||||
lineNumber: 1,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
if out := NewContext([]byte(tt.in)); !reflect.DeepEqual(tt.out, out) {
|
||||
t.Errorf("bad context (%q): want %#v, got %#v", tt.in, tt.out, out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIncrement(t *testing.T) {
|
||||
tests := []struct {
|
||||
init Context
|
||||
op func(c *Context)
|
||||
|
||||
res Context
|
||||
}{
|
||||
{
|
||||
init: Context{
|
||||
currentLine: "",
|
||||
remainingLines: "",
|
||||
lineNumber: 0,
|
||||
},
|
||||
res: Context{
|
||||
currentLine: "",
|
||||
remainingLines: "",
|
||||
lineNumber: 0,
|
||||
},
|
||||
op: func(c *Context) {
|
||||
c.Increment()
|
||||
},
|
||||
},
|
||||
{
|
||||
init: Context{
|
||||
currentLine: "test",
|
||||
remainingLines: "",
|
||||
lineNumber: 1,
|
||||
},
|
||||
res: Context{
|
||||
currentLine: "",
|
||||
remainingLines: "",
|
||||
lineNumber: 2,
|
||||
},
|
||||
op: func(c *Context) {
|
||||
c.Increment()
|
||||
c.Increment()
|
||||
c.Increment()
|
||||
},
|
||||
},
|
||||
{
|
||||
init: Context{
|
||||
currentLine: "this",
|
||||
remainingLines: "is\na\ntest",
|
||||
lineNumber: 1,
|
||||
},
|
||||
res: Context{
|
||||
currentLine: "is",
|
||||
remainingLines: "a\ntest",
|
||||
lineNumber: 2,
|
||||
},
|
||||
op: func(c *Context) {
|
||||
c.Increment()
|
||||
},
|
||||
},
|
||||
{
|
||||
init: Context{
|
||||
currentLine: "this",
|
||||
remainingLines: "is\na\ntest",
|
||||
lineNumber: 1,
|
||||
},
|
||||
res: Context{
|
||||
currentLine: "test",
|
||||
remainingLines: "",
|
||||
lineNumber: 4,
|
||||
},
|
||||
op: func(c *Context) {
|
||||
c.Increment()
|
||||
c.Increment()
|
||||
c.Increment()
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
res := tt.init
|
||||
if tt.op(&res); !reflect.DeepEqual(tt.res, res) {
|
||||
t.Errorf("bad context (%d, %#v): want %#v, got %#v", i, tt.init, tt.res, res)
|
||||
}
|
||||
}
|
||||
}
|
||||
157
config/cloudinit/config/validate/node.go
Normal file
157
config/cloudinit/config/validate/node.go
Normal file
@@ -0,0 +1,157 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package validate
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
var (
|
||||
yamlKey = regexp.MustCompile(`^ *-? ?(?P<key>.*?):`)
|
||||
yamlElem = regexp.MustCompile(`^ *-`)
|
||||
)
|
||||
|
||||
type Node struct {
|
||||
name string
|
||||
line int
|
||||
children []Node
|
||||
field reflect.StructField
|
||||
reflect.Value
|
||||
}
|
||||
|
||||
// Child attempts to find the child with the given name in the Node's list of
|
||||
// children. If no such child is found, an invalid Node is returned.
|
||||
func (n Node) Child(name string) Node {
|
||||
for _, c := range n.children {
|
||||
if c.name == name {
|
||||
return c
|
||||
}
|
||||
}
|
||||
return Node{}
|
||||
}
|
||||
|
||||
// HumanType returns the human-consumable string representation of the type of
|
||||
// the Node.
|
||||
func (n Node) HumanType() string {
|
||||
switch k := n.Kind(); k {
|
||||
case reflect.Slice:
|
||||
c := n.Type().Elem()
|
||||
return "[]" + Node{Value: reflect.New(c).Elem()}.HumanType()
|
||||
default:
|
||||
return k.String()
|
||||
}
|
||||
}
|
||||
|
||||
// NewNode returns the Node representation of the given value. The context
|
||||
// will be used in an attempt to determine line numbers for the given value.
|
||||
func NewNode(value interface{}, context Context) Node {
|
||||
var n Node
|
||||
toNode(value, context, &n)
|
||||
return n
|
||||
}
|
||||
|
||||
// toNode converts the given value into a Node and then recursively processes
|
||||
// each of the Nodes components (e.g. fields, array elements, keys).
|
||||
func toNode(v interface{}, c Context, n *Node) {
|
||||
vv := reflect.ValueOf(v)
|
||||
if !vv.IsValid() {
|
||||
return
|
||||
}
|
||||
|
||||
n.Value = vv
|
||||
switch vv.Kind() {
|
||||
case reflect.Struct:
|
||||
// Walk over each field in the structure, skipping unexported fields,
|
||||
// and create a Node for it.
|
||||
for i := 0; i < vv.Type().NumField(); i++ {
|
||||
ft := vv.Type().Field(i)
|
||||
k := ft.Tag.Get("yaml")
|
||||
if k == "-" || k == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
cn := Node{name: k, field: ft}
|
||||
c, ok := findKey(cn.name, c)
|
||||
if ok {
|
||||
cn.line = c.lineNumber
|
||||
}
|
||||
toNode(vv.Field(i).Interface(), c, &cn)
|
||||
n.children = append(n.children, cn)
|
||||
}
|
||||
case reflect.Map:
|
||||
// Walk over each key in the map and create a Node for it.
|
||||
v := v.(map[interface{}]interface{})
|
||||
for k, cv := range v {
|
||||
cn := Node{name: fmt.Sprintf("%s", k)}
|
||||
c, ok := findKey(cn.name, c)
|
||||
if ok {
|
||||
cn.line = c.lineNumber
|
||||
}
|
||||
toNode(cv, c, &cn)
|
||||
n.children = append(n.children, cn)
|
||||
}
|
||||
case reflect.Slice:
|
||||
// Walk over each element in the slice and create a Node for it.
|
||||
// While iterating over the slice, preserve the context after it
|
||||
// is modified. This allows the line numbers to reflect the current
|
||||
// element instead of the first.
|
||||
for i := 0; i < vv.Len(); i++ {
|
||||
cn := Node{
|
||||
name: fmt.Sprintf("%s[%d]", n.name, i),
|
||||
field: n.field,
|
||||
}
|
||||
var ok bool
|
||||
c, ok = findElem(c)
|
||||
if ok {
|
||||
cn.line = c.lineNumber
|
||||
}
|
||||
toNode(vv.Index(i).Interface(), c, &cn)
|
||||
n.children = append(n.children, cn)
|
||||
c.Increment()
|
||||
}
|
||||
case reflect.String, reflect.Int, reflect.Bool, reflect.Float64:
|
||||
default:
|
||||
panic(fmt.Sprintf("toNode(): unhandled kind %s", vv.Kind()))
|
||||
}
|
||||
}
|
||||
|
||||
// findKey attempts to find the requested key within the provided context.
|
||||
// A modified copy of the context is returned with every line up to the key
|
||||
// incremented past. A boolean, true if the key was found, is also returned.
|
||||
func findKey(key string, context Context) (Context, bool) {
|
||||
return find(yamlKey, key, context)
|
||||
}
|
||||
|
||||
// findElem attempts to find an array element within the provided context.
|
||||
// A modified copy of the context is returned with every line up to the array
|
||||
// element incremented past. A boolean, true if the key was found, is also
|
||||
// returned.
|
||||
func findElem(context Context) (Context, bool) {
|
||||
return find(yamlElem, "", context)
|
||||
}
|
||||
|
||||
func find(exp *regexp.Regexp, key string, context Context) (Context, bool) {
|
||||
for len(context.currentLine) > 0 || len(context.remainingLines) > 0 {
|
||||
matches := exp.FindStringSubmatch(context.currentLine)
|
||||
if len(matches) > 0 && (key == "" || matches[1] == key) {
|
||||
return context, true
|
||||
}
|
||||
|
||||
context.Increment()
|
||||
}
|
||||
return context, false
|
||||
}
|
||||
284
config/cloudinit/config/validate/node_test.go
Normal file
284
config/cloudinit/config/validate/node_test.go
Normal file
@@ -0,0 +1,284 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package validate
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestChild(t *testing.T) {
|
||||
tests := []struct {
|
||||
parent Node
|
||||
name string
|
||||
|
||||
child Node
|
||||
}{
|
||||
{},
|
||||
{
|
||||
name: "c1",
|
||||
},
|
||||
{
|
||||
parent: Node{
|
||||
children: []Node{
|
||||
{name: "c1"},
|
||||
{name: "c2"},
|
||||
{name: "c3"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
parent: Node{
|
||||
children: []Node{
|
||||
{name: "c1"},
|
||||
{name: "c2"},
|
||||
{name: "c3"},
|
||||
},
|
||||
},
|
||||
name: "c2",
|
||||
child: Node{name: "c2"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
if child := tt.parent.Child(tt.name); !reflect.DeepEqual(tt.child, child) {
|
||||
t.Errorf("bad child (%q): want %#v, got %#v", tt.name, tt.child, child)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHumanType(t *testing.T) {
|
||||
tests := []struct {
|
||||
node Node
|
||||
|
||||
humanType string
|
||||
}{
|
||||
{
|
||||
humanType: "invalid",
|
||||
},
|
||||
{
|
||||
node: Node{Value: reflect.ValueOf("hello")},
|
||||
humanType: "string",
|
||||
},
|
||||
{
|
||||
node: Node{
|
||||
Value: reflect.ValueOf([]int{1, 2}),
|
||||
children: []Node{
|
||||
{Value: reflect.ValueOf(1)},
|
||||
{Value: reflect.ValueOf(2)},
|
||||
}},
|
||||
humanType: "[]int",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
if humanType := tt.node.HumanType(); tt.humanType != humanType {
|
||||
t.Errorf("bad type (%q): want %q, got %q", tt.node, tt.humanType, humanType)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestToNode(t *testing.T) {
|
||||
tests := []struct {
|
||||
value interface{}
|
||||
context Context
|
||||
|
||||
node Node
|
||||
}{
|
||||
{},
|
||||
{
|
||||
value: struct{}{},
|
||||
node: Node{Value: reflect.ValueOf(struct{}{})},
|
||||
},
|
||||
{
|
||||
value: struct {
|
||||
A int `yaml:"a"`
|
||||
}{},
|
||||
node: Node{
|
||||
children: []Node{
|
||||
{
|
||||
name: "a",
|
||||
field: reflect.TypeOf(struct {
|
||||
A int `yaml:"a"`
|
||||
}{}).Field(0),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
value: struct {
|
||||
A []int `yaml:"a"`
|
||||
}{},
|
||||
node: Node{
|
||||
children: []Node{
|
||||
{
|
||||
name: "a",
|
||||
field: reflect.TypeOf(struct {
|
||||
A []int `yaml:"a"`
|
||||
}{}).Field(0),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
value: map[interface{}]interface{}{
|
||||
"a": map[interface{}]interface{}{
|
||||
"b": 2,
|
||||
},
|
||||
},
|
||||
context: NewContext([]byte("a:\n b: 2")),
|
||||
node: Node{
|
||||
children: []Node{
|
||||
{
|
||||
line: 1,
|
||||
name: "a",
|
||||
children: []Node{
|
||||
{name: "b", line: 2},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
value: struct {
|
||||
A struct {
|
||||
Jon bool `yaml:"b"`
|
||||
} `yaml:"a"`
|
||||
}{},
|
||||
node: Node{
|
||||
children: []Node{
|
||||
{
|
||||
name: "a",
|
||||
children: []Node{
|
||||
{
|
||||
name: "b",
|
||||
field: reflect.TypeOf(struct {
|
||||
Jon bool `yaml:"b"`
|
||||
}{}).Field(0),
|
||||
Value: reflect.ValueOf(false),
|
||||
},
|
||||
},
|
||||
field: reflect.TypeOf(struct {
|
||||
A struct {
|
||||
Jon bool `yaml:"b"`
|
||||
} `yaml:"a"`
|
||||
}{}).Field(0),
|
||||
Value: reflect.ValueOf(struct {
|
||||
Jon bool `yaml:"b"`
|
||||
}{}),
|
||||
},
|
||||
},
|
||||
Value: reflect.ValueOf(struct {
|
||||
A struct {
|
||||
Jon bool `yaml:"b"`
|
||||
} `yaml:"a"`
|
||||
}{}),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
var node Node
|
||||
toNode(tt.value, tt.context, &node)
|
||||
if !nodesEqual(tt.node, node) {
|
||||
t.Errorf("bad node (%#v): want %#v, got %#v", tt.value, tt.node, node)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindKey(t *testing.T) {
|
||||
tests := []struct {
|
||||
key string
|
||||
context Context
|
||||
|
||||
found bool
|
||||
}{
|
||||
{},
|
||||
{
|
||||
key: "key1",
|
||||
context: NewContext([]byte("key1: hi")),
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
key: "key2",
|
||||
context: NewContext([]byte("key1: hi")),
|
||||
found: false,
|
||||
},
|
||||
{
|
||||
key: "key3",
|
||||
context: NewContext([]byte("key1:\n key2:\n key3: hi")),
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
key: "key4",
|
||||
context: NewContext([]byte("key1:\n - key4: hi")),
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
key: "key5",
|
||||
context: NewContext([]byte("#key5")),
|
||||
found: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
if _, found := findKey(tt.key, tt.context); tt.found != found {
|
||||
t.Errorf("bad find (%q): want %t, got %t", tt.key, tt.found, found)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindElem(t *testing.T) {
|
||||
tests := []struct {
|
||||
context Context
|
||||
|
||||
found bool
|
||||
}{
|
||||
{},
|
||||
{
|
||||
context: NewContext([]byte("test: hi")),
|
||||
found: false,
|
||||
},
|
||||
{
|
||||
context: NewContext([]byte("test:\n - a\n -b")),
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
context: NewContext([]byte("test:\n -\n a")),
|
||||
found: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
if _, found := findElem(tt.context); tt.found != found {
|
||||
t.Errorf("bad find (%q): want %t, got %t", tt.context, tt.found, found)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func nodesEqual(a, b Node) bool {
|
||||
if a.name != b.name ||
|
||||
a.line != b.line ||
|
||||
!reflect.DeepEqual(a.field, b.field) ||
|
||||
len(a.children) != len(b.children) {
|
||||
return false
|
||||
}
|
||||
for i := 0; i < len(a.children); i++ {
|
||||
if !nodesEqual(a.children[i], b.children[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
88
config/cloudinit/config/validate/report.go
Normal file
88
config/cloudinit/config/validate/report.go
Normal file
@@ -0,0 +1,88 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package validate
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Report represents the list of entries resulting from validation.
|
||||
type Report struct {
|
||||
entries []Entry
|
||||
}
|
||||
|
||||
// Error adds an error entry to the report.
|
||||
func (r *Report) Error(line int, message string) {
|
||||
r.entries = append(r.entries, Entry{entryError, message, line})
|
||||
}
|
||||
|
||||
// Warning adds a warning entry to the report.
|
||||
func (r *Report) Warning(line int, message string) {
|
||||
r.entries = append(r.entries, Entry{entryWarning, message, line})
|
||||
}
|
||||
|
||||
// Info adds an info entry to the report.
|
||||
func (r *Report) Info(line int, message string) {
|
||||
r.entries = append(r.entries, Entry{entryInfo, message, line})
|
||||
}
|
||||
|
||||
// Entries returns the list of entries in the report.
|
||||
func (r *Report) Entries() []Entry {
|
||||
return r.entries
|
||||
}
|
||||
|
||||
// Entry represents a single generic item in the report.
|
||||
type Entry struct {
|
||||
kind entryKind
|
||||
message string
|
||||
line int
|
||||
}
|
||||
|
||||
// String returns a human-readable representation of the entry.
|
||||
func (e Entry) String() string {
|
||||
return fmt.Sprintf("line %d: %s: %s", e.line, e.kind, e.message)
|
||||
}
|
||||
|
||||
// MarshalJSON satisfies the json.Marshaler interface, returning the entry
|
||||
// encoded as a JSON object.
|
||||
func (e Entry) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(map[string]interface{}{
|
||||
"kind": e.kind.String(),
|
||||
"message": e.message,
|
||||
"line": e.line,
|
||||
})
|
||||
}
|
||||
|
||||
type entryKind int
|
||||
|
||||
const (
|
||||
entryError entryKind = iota
|
||||
entryWarning
|
||||
entryInfo
|
||||
)
|
||||
|
||||
func (k entryKind) String() string {
|
||||
switch k {
|
||||
case entryError:
|
||||
return "error"
|
||||
case entryWarning:
|
||||
return "warning"
|
||||
case entryInfo:
|
||||
return "info"
|
||||
default:
|
||||
panic(fmt.Sprintf("invalid kind %d", k))
|
||||
}
|
||||
}
|
||||
96
config/cloudinit/config/validate/report_test.go
Normal file
96
config/cloudinit/config/validate/report_test.go
Normal file
@@ -0,0 +1,96 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package validate
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestEntry(t *testing.T) {
|
||||
tests := []struct {
|
||||
entry Entry
|
||||
|
||||
str string
|
||||
json []byte
|
||||
}{
|
||||
{
|
||||
Entry{entryInfo, "test info", 1},
|
||||
"line 1: info: test info",
|
||||
[]byte(`{"kind":"info","line":1,"message":"test info"}`),
|
||||
},
|
||||
{
|
||||
Entry{entryWarning, "test warning", 1},
|
||||
"line 1: warning: test warning",
|
||||
[]byte(`{"kind":"warning","line":1,"message":"test warning"}`),
|
||||
},
|
||||
{
|
||||
Entry{entryError, "test error", 2},
|
||||
"line 2: error: test error",
|
||||
[]byte(`{"kind":"error","line":2,"message":"test error"}`),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
if str := tt.entry.String(); tt.str != str {
|
||||
t.Errorf("bad string (%q): want %q, got %q", tt.entry, tt.str, str)
|
||||
}
|
||||
json, err := tt.entry.MarshalJSON()
|
||||
if err != nil {
|
||||
t.Errorf("bad error (%q): want %v, got %q", tt.entry, nil, err)
|
||||
}
|
||||
if !bytes.Equal(tt.json, json) {
|
||||
t.Errorf("bad JSON (%q): want %q, got %q", tt.entry, tt.json, json)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReport(t *testing.T) {
|
||||
type reportFunc struct {
|
||||
fn func(*Report, int, string)
|
||||
line int
|
||||
message string
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
fs []reportFunc
|
||||
|
||||
es []Entry
|
||||
}{
|
||||
{
|
||||
[]reportFunc{
|
||||
{(*Report).Warning, 1, "test warning 1"},
|
||||
{(*Report).Error, 2, "test error 2"},
|
||||
{(*Report).Info, 10, "test info 10"},
|
||||
},
|
||||
[]Entry{
|
||||
{entryWarning, "test warning 1", 1},
|
||||
{entryError, "test error 2", 2},
|
||||
{entryInfo, "test info 10", 10},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
r := Report{}
|
||||
for _, f := range tt.fs {
|
||||
f.fn(&r, f.line, f.message)
|
||||
}
|
||||
if es := r.Entries(); !reflect.DeepEqual(tt.es, es) {
|
||||
t.Errorf("bad entries (%v): want %#v, got %#v", tt.fs, tt.es, es)
|
||||
}
|
||||
}
|
||||
}
|
||||
180
config/cloudinit/config/validate/rules.go
Normal file
180
config/cloudinit/config/validate/rules.go
Normal file
@@ -0,0 +1,180 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package validate
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"path"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/rancher/os/config/cloudinit/config"
|
||||
)
|
||||
|
||||
type rule func(config Node, report *Report)
|
||||
|
||||
// Rules contains all of the validation rules.
|
||||
var Rules = []rule{
|
||||
checkDiscoveryURL,
|
||||
checkEncoding,
|
||||
checkStructure,
|
||||
checkValidity,
|
||||
checkWriteFiles,
|
||||
checkWriteFilesUnderCoreos,
|
||||
}
|
||||
|
||||
// checkDiscoveryURL verifies that the string is a valid url.
|
||||
func checkDiscoveryURL(cfg Node, report *Report) {
|
||||
c := cfg.Child("coreos").Child("etcd").Child("discovery")
|
||||
if !c.IsValid() {
|
||||
return
|
||||
}
|
||||
|
||||
if _, err := url.ParseRequestURI(c.String()); err != nil {
|
||||
report.Warning(c.line, "discovery URL is not valid")
|
||||
}
|
||||
}
|
||||
|
||||
// checkEncoding validates that, for each file under 'write_files', the
|
||||
// content can be decoded given the specified encoding.
|
||||
func checkEncoding(cfg Node, report *Report) {
|
||||
for _, f := range cfg.Child("write_files").children {
|
||||
e := f.Child("encoding")
|
||||
if !e.IsValid() {
|
||||
continue
|
||||
}
|
||||
|
||||
c := f.Child("content")
|
||||
if _, err := config.DecodeContent(c.String(), e.String()); err != nil {
|
||||
report.Error(c.line, fmt.Sprintf("content cannot be decoded as %q", e.String()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// checkStructure compares the provided config to the empty config.CloudConfig
|
||||
// structure. Each node is checked to make sure that it exists in the known
|
||||
// structure and that its type is compatible.
|
||||
func checkStructure(cfg Node, report *Report) {
|
||||
g := NewNode(config.CloudConfig{}, NewContext([]byte{}))
|
||||
checkNodeStructure(cfg, g, report)
|
||||
}
|
||||
|
||||
func checkNodeStructure(n, g Node, r *Report) {
|
||||
if !isCompatible(n.Kind(), g.Kind()) {
|
||||
r.Warning(n.line, fmt.Sprintf("incorrect type for %q (want %s)", n.name, g.HumanType()))
|
||||
return
|
||||
}
|
||||
|
||||
switch g.Kind() {
|
||||
case reflect.Struct:
|
||||
for _, cn := range n.children {
|
||||
if cg := g.Child(cn.name); cg.IsValid() {
|
||||
if msg := cg.field.Tag.Get("deprecated"); msg != "" {
|
||||
r.Warning(cn.line, fmt.Sprintf("deprecated key %q (%s)", cn.name, msg))
|
||||
}
|
||||
checkNodeStructure(cn, cg, r)
|
||||
} else {
|
||||
r.Warning(cn.line, fmt.Sprintf("unrecognized key %q", cn.name))
|
||||
}
|
||||
}
|
||||
case reflect.Slice:
|
||||
for _, cn := range n.children {
|
||||
var cg Node
|
||||
c := g.Type().Elem()
|
||||
toNode(reflect.New(c).Elem().Interface(), Context{}, &cg)
|
||||
checkNodeStructure(cn, cg, r)
|
||||
}
|
||||
case reflect.String, reflect.Int, reflect.Float64, reflect.Bool:
|
||||
default:
|
||||
panic(fmt.Sprintf("checkNodeStructure(): unhandled kind %s", g.Kind()))
|
||||
}
|
||||
}
|
||||
|
||||
// isCompatible determines if the type of kind n can be converted to the type
|
||||
// of kind g in the context of YAML. This is not an exhaustive list, but its
|
||||
// enough for the purposes of cloud-config validation.
|
||||
func isCompatible(n, g reflect.Kind) bool {
|
||||
switch g {
|
||||
case reflect.String:
|
||||
return n == reflect.String || n == reflect.Int || n == reflect.Float64 || n == reflect.Bool
|
||||
case reflect.Struct:
|
||||
return n == reflect.Struct || n == reflect.Map
|
||||
case reflect.Float64:
|
||||
return n == reflect.Float64 || n == reflect.Int
|
||||
case reflect.Bool, reflect.Slice, reflect.Int:
|
||||
return n == g
|
||||
default:
|
||||
panic(fmt.Sprintf("isCompatible(): unhandled kind %s", g))
|
||||
}
|
||||
}
|
||||
|
||||
// checkValidity checks the value of every node in the provided config by
|
||||
// running config.AssertValid() on it.
|
||||
func checkValidity(cfg Node, report *Report) {
|
||||
g := NewNode(config.CloudConfig{}, NewContext([]byte{}))
|
||||
checkNodeValidity(cfg, g, report)
|
||||
}
|
||||
|
||||
func checkNodeValidity(n, g Node, r *Report) {
|
||||
if err := config.AssertValid(n.Value, g.field.Tag.Get("valid")); err != nil {
|
||||
r.Error(n.line, fmt.Sprintf("invalid value %v", n.Value.Interface()))
|
||||
}
|
||||
switch g.Kind() {
|
||||
case reflect.Struct:
|
||||
for _, cn := range n.children {
|
||||
if cg := g.Child(cn.name); cg.IsValid() {
|
||||
checkNodeValidity(cn, cg, r)
|
||||
}
|
||||
}
|
||||
case reflect.Slice:
|
||||
for _, cn := range n.children {
|
||||
var cg Node
|
||||
c := g.Type().Elem()
|
||||
toNode(reflect.New(c).Elem().Interface(), Context{}, &cg)
|
||||
checkNodeValidity(cn, cg, r)
|
||||
}
|
||||
case reflect.String, reflect.Int, reflect.Float64, reflect.Bool:
|
||||
default:
|
||||
panic(fmt.Sprintf("checkNodeValidity(): unhandled kind %s", g.Kind()))
|
||||
}
|
||||
}
|
||||
|
||||
// checkWriteFiles checks to make sure that the target file can actually be
|
||||
// written. Note that this check is approximate (it only checks to see if the file
|
||||
// is under /usr).
|
||||
func checkWriteFiles(cfg Node, report *Report) {
|
||||
for _, f := range cfg.Child("write_files").children {
|
||||
c := f.Child("path")
|
||||
if !c.IsValid() {
|
||||
continue
|
||||
}
|
||||
|
||||
d := path.Dir(c.String())
|
||||
switch {
|
||||
case strings.HasPrefix(d, "/usr"):
|
||||
report.Error(c.line, "file cannot be written to a read-only filesystem")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// checkWriteFilesUnderCoreos checks to see if the 'write_files' node is a
|
||||
// child of 'coreos' (it shouldn't be).
|
||||
func checkWriteFilesUnderCoreos(cfg Node, report *Report) {
|
||||
c := cfg.Child("coreos").Child("write_files")
|
||||
if c.IsValid() {
|
||||
report.Info(c.line, "write_files doesn't belong under coreos")
|
||||
}
|
||||
}
|
||||
408
config/cloudinit/config/validate/rules_test.go
Normal file
408
config/cloudinit/config/validate/rules_test.go
Normal file
@@ -0,0 +1,408 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package validate
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestCheckDiscoveryURL(t *testing.T) {
|
||||
tests := []struct {
|
||||
config string
|
||||
|
||||
entries []Entry
|
||||
}{
|
||||
{},
|
||||
{
|
||||
config: "coreos:\n etcd:\n discovery: https://discovery.etcd.io/00000000000000000000000000000000",
|
||||
},
|
||||
{
|
||||
config: "coreos:\n etcd:\n discovery: http://custom.domain/mytoken",
|
||||
},
|
||||
{
|
||||
config: "coreos:\n etcd:\n discovery: disco",
|
||||
entries: []Entry{{entryWarning, "discovery URL is not valid", 3}},
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
r := Report{}
|
||||
n, err := parseCloudConfig([]byte(tt.config), &r)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
checkDiscoveryURL(n, &r)
|
||||
|
||||
if e := r.Entries(); !reflect.DeepEqual(tt.entries, e) {
|
||||
t.Errorf("bad report (%d, %q): want %#v, got %#v", i, tt.config, tt.entries, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckEncoding(t *testing.T) {
|
||||
tests := []struct {
|
||||
config string
|
||||
|
||||
entries []Entry
|
||||
}{
|
||||
{},
|
||||
{
|
||||
config: "write_files:\n - encoding: base64\n content: aGVsbG8K",
|
||||
},
|
||||
{
|
||||
config: "write_files:\n - content: !!binary aGVsbG8K",
|
||||
},
|
||||
{
|
||||
config: "write_files:\n - encoding: base64\n content: !!binary aGVsbG8K",
|
||||
entries: []Entry{{entryError, `content cannot be decoded as "base64"`, 3}},
|
||||
},
|
||||
{
|
||||
config: "write_files:\n - encoding: base64\n content: !!binary YUdWc2JHOEsK",
|
||||
},
|
||||
{
|
||||
config: "write_files:\n - encoding: gzip\n content: !!binary H4sIAOC3tVQAA8tIzcnJ5wIAIDA6NgYAAAA=",
|
||||
},
|
||||
{
|
||||
config: "write_files:\n - encoding: gzip+base64\n content: H4sIAOC3tVQAA8tIzcnJ5wIAIDA6NgYAAAA=",
|
||||
},
|
||||
{
|
||||
config: "write_files:\n - encoding: custom\n content: hello",
|
||||
entries: []Entry{{entryError, `content cannot be decoded as "custom"`, 3}},
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
r := Report{}
|
||||
n, err := parseCloudConfig([]byte(tt.config), &r)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
checkEncoding(n, &r)
|
||||
|
||||
if e := r.Entries(); !reflect.DeepEqual(tt.entries, e) {
|
||||
t.Errorf("bad report (%d, %q): want %#v, got %#v", i, tt.config, tt.entries, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckStructure(t *testing.T) {
|
||||
tests := []struct {
|
||||
config string
|
||||
|
||||
entries []Entry
|
||||
}{
|
||||
{},
|
||||
|
||||
// Test for unrecognized keys
|
||||
{
|
||||
config: "test:",
|
||||
entries: []Entry{{entryWarning, "unrecognized key \"test\"", 1}},
|
||||
},
|
||||
{
|
||||
config: "coreos:\n etcd:\n bad:",
|
||||
entries: []Entry{{entryWarning, "unrecognized key \"bad\"", 3}},
|
||||
},
|
||||
{
|
||||
config: "coreos:\n etcd:\n discovery: good",
|
||||
},
|
||||
|
||||
// Test for deprecated keys
|
||||
{
|
||||
config: "coreos:\n etcd:\n addr: hi",
|
||||
},
|
||||
{
|
||||
config: "coreos:\n etcd:\n proxy: hi",
|
||||
entries: []Entry{{entryWarning, "deprecated key \"proxy\" (etcd2 options no longer work for etcd)", 3}},
|
||||
},
|
||||
|
||||
// Test for error on list of nodes
|
||||
{
|
||||
config: "coreos:\n units:\n - hello\n - goodbye",
|
||||
entries: []Entry{
|
||||
{entryWarning, "incorrect type for \"units[0]\" (want struct)", 3},
|
||||
{entryWarning, "incorrect type for \"units[1]\" (want struct)", 4},
|
||||
},
|
||||
},
|
||||
|
||||
// Test for incorrect types
|
||||
// Want boolean
|
||||
{
|
||||
config: "coreos:\n units:\n - enable: true",
|
||||
},
|
||||
{
|
||||
config: "coreos:\n units:\n - enable: 4",
|
||||
entries: []Entry{{entryWarning, "incorrect type for \"enable\" (want bool)", 3}},
|
||||
},
|
||||
{
|
||||
config: "coreos:\n units:\n - enable: bad",
|
||||
entries: []Entry{{entryWarning, "incorrect type for \"enable\" (want bool)", 3}},
|
||||
},
|
||||
{
|
||||
config: "coreos:\n units:\n - enable:\n bad:",
|
||||
entries: []Entry{{entryWarning, "incorrect type for \"enable\" (want bool)", 3}},
|
||||
},
|
||||
{
|
||||
config: "coreos:\n units:\n - enable:\n - bad",
|
||||
entries: []Entry{{entryWarning, "incorrect type for \"enable\" (want bool)", 3}},
|
||||
},
|
||||
// Want string
|
||||
{
|
||||
config: "hostname: true",
|
||||
},
|
||||
{
|
||||
config: "hostname: 4",
|
||||
},
|
||||
{
|
||||
config: "hostname: host",
|
||||
},
|
||||
{
|
||||
config: "hostname:\n name:",
|
||||
entries: []Entry{{entryWarning, "incorrect type for \"hostname\" (want string)", 1}},
|
||||
},
|
||||
{
|
||||
config: "hostname:\n - name",
|
||||
entries: []Entry{{entryWarning, "incorrect type for \"hostname\" (want string)", 1}},
|
||||
},
|
||||
// Want struct
|
||||
{
|
||||
config: "coreos: true",
|
||||
entries: []Entry{{entryWarning, "incorrect type for \"coreos\" (want struct)", 1}},
|
||||
},
|
||||
{
|
||||
config: "coreos: 4",
|
||||
entries: []Entry{{entryWarning, "incorrect type for \"coreos\" (want struct)", 1}},
|
||||
},
|
||||
{
|
||||
config: "coreos: hello",
|
||||
entries: []Entry{{entryWarning, "incorrect type for \"coreos\" (want struct)", 1}},
|
||||
},
|
||||
{
|
||||
config: "coreos:\n etcd:\n discovery: fire in the disco",
|
||||
},
|
||||
{
|
||||
config: "coreos:\n - hello",
|
||||
entries: []Entry{{entryWarning, "incorrect type for \"coreos\" (want struct)", 1}},
|
||||
},
|
||||
// Want []string
|
||||
{
|
||||
config: "ssh_authorized_keys: true",
|
||||
entries: []Entry{{entryWarning, "incorrect type for \"ssh_authorized_keys\" (want []string)", 1}},
|
||||
},
|
||||
{
|
||||
config: "ssh_authorized_keys: 4",
|
||||
entries: []Entry{{entryWarning, "incorrect type for \"ssh_authorized_keys\" (want []string)", 1}},
|
||||
},
|
||||
{
|
||||
config: "ssh_authorized_keys: key",
|
||||
entries: []Entry{{entryWarning, "incorrect type for \"ssh_authorized_keys\" (want []string)", 1}},
|
||||
},
|
||||
{
|
||||
config: "ssh_authorized_keys:\n key: value",
|
||||
entries: []Entry{{entryWarning, "incorrect type for \"ssh_authorized_keys\" (want []string)", 1}},
|
||||
},
|
||||
{
|
||||
config: "ssh_authorized_keys:\n - key",
|
||||
},
|
||||
{
|
||||
config: "ssh_authorized_keys:\n - key: value",
|
||||
entries: []Entry{{entryWarning, "incorrect type for \"ssh_authorized_keys[0]\" (want string)", 2}},
|
||||
},
|
||||
// Want []struct
|
||||
{
|
||||
config: "users:\n true",
|
||||
entries: []Entry{{entryWarning, "incorrect type for \"users\" (want []struct)", 1}},
|
||||
},
|
||||
{
|
||||
config: "users:\n 4",
|
||||
entries: []Entry{{entryWarning, "incorrect type for \"users\" (want []struct)", 1}},
|
||||
},
|
||||
{
|
||||
config: "users:\n bad",
|
||||
entries: []Entry{{entryWarning, "incorrect type for \"users\" (want []struct)", 1}},
|
||||
},
|
||||
{
|
||||
config: "users:\n bad:",
|
||||
entries: []Entry{{entryWarning, "incorrect type for \"users\" (want []struct)", 1}},
|
||||
},
|
||||
{
|
||||
config: "users:\n - name: good",
|
||||
},
|
||||
// Want struct within array
|
||||
{
|
||||
config: "users:\n - true",
|
||||
entries: []Entry{{entryWarning, "incorrect type for \"users[0]\" (want struct)", 2}},
|
||||
},
|
||||
{
|
||||
config: "users:\n - name: hi\n - true",
|
||||
entries: []Entry{{entryWarning, "incorrect type for \"users[1]\" (want struct)", 3}},
|
||||
},
|
||||
{
|
||||
config: "users:\n - 4",
|
||||
entries: []Entry{{entryWarning, "incorrect type for \"users[0]\" (want struct)", 2}},
|
||||
},
|
||||
{
|
||||
config: "users:\n - bad",
|
||||
entries: []Entry{{entryWarning, "incorrect type for \"users[0]\" (want struct)", 2}},
|
||||
},
|
||||
{
|
||||
config: "users:\n - - bad",
|
||||
entries: []Entry{{entryWarning, "incorrect type for \"users[0]\" (want struct)", 2}},
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
r := Report{}
|
||||
n, err := parseCloudConfig([]byte(tt.config), &r)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
checkStructure(n, &r)
|
||||
|
||||
if e := r.Entries(); !reflect.DeepEqual(tt.entries, e) {
|
||||
t.Errorf("bad report (%d, %q): want %#v, got %#v", i, tt.config, tt.entries, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckValidity(t *testing.T) {
|
||||
tests := []struct {
|
||||
config string
|
||||
|
||||
entries []Entry
|
||||
}{
|
||||
// string
|
||||
{
|
||||
config: "hostname: test",
|
||||
},
|
||||
|
||||
// int
|
||||
{
|
||||
config: "coreos:\n fleet:\n verbosity: 2",
|
||||
},
|
||||
|
||||
// bool
|
||||
{
|
||||
config: "coreos:\n units:\n - enable: true",
|
||||
},
|
||||
|
||||
// slice
|
||||
{
|
||||
config: "coreos:\n units:\n - command: start\n - name: stop",
|
||||
},
|
||||
{
|
||||
config: "coreos:\n units:\n - command: lol",
|
||||
entries: []Entry{{entryError, "invalid value lol", 3}},
|
||||
},
|
||||
|
||||
// struct
|
||||
{
|
||||
config: "coreos:\n update:\n reboot_strategy: off",
|
||||
},
|
||||
{
|
||||
config: "coreos:\n update:\n reboot_strategy: always",
|
||||
entries: []Entry{{entryError, "invalid value always", 3}},
|
||||
},
|
||||
|
||||
// unknown
|
||||
{
|
||||
config: "unknown: hi",
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
r := Report{}
|
||||
n, err := parseCloudConfig([]byte(tt.config), &r)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
checkValidity(n, &r)
|
||||
|
||||
if e := r.Entries(); !reflect.DeepEqual(tt.entries, e) {
|
||||
t.Errorf("bad report (%d, %q): want %#v, got %#v", i, tt.config, tt.entries, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckWriteFiles(t *testing.T) {
|
||||
tests := []struct {
|
||||
config string
|
||||
|
||||
entries []Entry
|
||||
}{
|
||||
{},
|
||||
{
|
||||
config: "write_files:\n - path: /valid",
|
||||
},
|
||||
{
|
||||
config: "write_files:\n - path: /tmp/usr/valid",
|
||||
},
|
||||
{
|
||||
config: "write_files:\n - path: /usr/invalid",
|
||||
entries: []Entry{{entryError, "file cannot be written to a read-only filesystem", 2}},
|
||||
},
|
||||
{
|
||||
config: "write-files:\n - path: /tmp/../usr/invalid",
|
||||
entries: []Entry{{entryError, "file cannot be written to a read-only filesystem", 2}},
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
r := Report{}
|
||||
n, err := parseCloudConfig([]byte(tt.config), &r)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
checkWriteFiles(n, &r)
|
||||
|
||||
if e := r.Entries(); !reflect.DeepEqual(tt.entries, e) {
|
||||
t.Errorf("bad report (%d, %q): want %#v, got %#v", i, tt.config, tt.entries, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckWriteFilesUnderCoreos(t *testing.T) {
|
||||
tests := []struct {
|
||||
config string
|
||||
|
||||
entries []Entry
|
||||
}{
|
||||
{},
|
||||
{
|
||||
config: "write_files:\n - path: /hi",
|
||||
},
|
||||
{
|
||||
config: "coreos:\n write_files:\n - path: /hi",
|
||||
entries: []Entry{{entryInfo, "write_files doesn't belong under coreos", 2}},
|
||||
},
|
||||
{
|
||||
config: "coreos:\n write-files:\n - path: /hyphen",
|
||||
entries: []Entry{{entryInfo, "write_files doesn't belong under coreos", 2}},
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
r := Report{}
|
||||
n, err := parseCloudConfig([]byte(tt.config), &r)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
checkWriteFilesUnderCoreos(n, &r)
|
||||
|
||||
if e := r.Entries(); !reflect.DeepEqual(tt.entries, e) {
|
||||
t.Errorf("bad report (%d, %q): want %#v, got %#v", i, tt.config, tt.entries, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
164
config/cloudinit/config/validate/validate.go
Normal file
164
config/cloudinit/config/validate/validate.go
Normal file
@@ -0,0 +1,164 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package validate
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/rancher/os/config/cloudinit/config"
|
||||
|
||||
"github.com/coreos/yaml"
|
||||
)
|
||||
|
||||
var (
|
||||
yamlLineError = regexp.MustCompile(`^YAML error: line (?P<line>[[:digit:]]+): (?P<msg>.*)$`)
|
||||
yamlError = regexp.MustCompile(`^YAML error: (?P<msg>.*)$`)
|
||||
)
|
||||
|
||||
// Validate runs a series of validation tests against the given userdata and
|
||||
// returns a report detailing all of the issues. Presently, only cloud-configs
|
||||
// can be validated.
|
||||
func Validate(userdataBytes []byte) (Report, error) {
|
||||
switch {
|
||||
case len(userdataBytes) == 0:
|
||||
return Report{}, nil
|
||||
case config.IsScript(string(userdataBytes)):
|
||||
return Report{}, nil
|
||||
case config.IsIgnitionConfig(string(userdataBytes)):
|
||||
return Report{}, nil
|
||||
case config.IsCloudConfig(string(userdataBytes)):
|
||||
return validateCloudConfig(userdataBytes, Rules)
|
||||
default:
|
||||
return Report{entries: []Entry{
|
||||
{kind: entryError, message: `must be "#cloud-config" or begin with "#!"`, line: 1},
|
||||
}}, nil
|
||||
}
|
||||
}
|
||||
|
||||
// validateCloudConfig runs all of the validation rules in Rules and returns
|
||||
// the resulting report and any errors encountered.
|
||||
func validateCloudConfig(config []byte, rules []rule) (report Report, err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
err = fmt.Errorf("%v", r)
|
||||
}
|
||||
}()
|
||||
|
||||
c, err := parseCloudConfig(config, &report)
|
||||
if err != nil {
|
||||
return report, err
|
||||
}
|
||||
|
||||
for _, r := range rules {
|
||||
r(c, &report)
|
||||
}
|
||||
return report, nil
|
||||
}
|
||||
|
||||
// parseCloudConfig parses the provided config into a node structure and logs
|
||||
// any parsing issues into the provided report. Unrecoverable errors are
|
||||
// returned as an error.
|
||||
func parseCloudConfig(cfg []byte, report *Report) (Node, error) {
|
||||
yaml.UnmarshalMappingKeyTransform = func(nameIn string) (nameOut string) {
|
||||
return nameIn
|
||||
}
|
||||
// unmarshal the config into an implicitly-typed form. The yaml library
|
||||
// will implicitly convert types into their normalized form
|
||||
// (e.g. 0744 -> 484, off -> false).
|
||||
var weak map[interface{}]interface{}
|
||||
if err := yaml.Unmarshal(cfg, &weak); err != nil {
|
||||
matches := yamlLineError.FindStringSubmatch(err.Error())
|
||||
if len(matches) == 3 {
|
||||
line, err := strconv.Atoi(matches[1])
|
||||
if err != nil {
|
||||
return Node{}, err
|
||||
}
|
||||
msg := matches[2]
|
||||
report.Error(line, msg)
|
||||
return Node{}, nil
|
||||
}
|
||||
|
||||
matches = yamlError.FindStringSubmatch(err.Error())
|
||||
if len(matches) == 2 {
|
||||
report.Error(1, matches[1])
|
||||
return Node{}, nil
|
||||
}
|
||||
|
||||
return Node{}, errors.New("couldn't parse yaml error")
|
||||
}
|
||||
w := NewNode(weak, NewContext(cfg))
|
||||
w = normalizeNodeNames(w, report)
|
||||
|
||||
// unmarshal the config into the explicitly-typed form.
|
||||
yaml.UnmarshalMappingKeyTransform = func(nameIn string) (nameOut string) {
|
||||
return strings.Replace(nameIn, "-", "_", -1)
|
||||
}
|
||||
var strong config.CloudConfig
|
||||
if err := yaml.Unmarshal([]byte(cfg), &strong); err != nil {
|
||||
return Node{}, err
|
||||
}
|
||||
s := NewNode(strong, NewContext(cfg))
|
||||
|
||||
// coerceNodes weak nodes and strong nodes. strong nodes replace weak nodes
|
||||
// if they are compatible types (this happens when the yaml library
|
||||
// converts the input).
|
||||
// (e.g. weak 484 is replaced by strong 0744, weak 4 is not replaced by
|
||||
// strong false)
|
||||
return coerceNodes(w, s), nil
|
||||
}
|
||||
|
||||
// coerceNodes recursively evaluates two nodes, returning a new node containing
|
||||
// either the weak or strong node's value and its recursively processed
|
||||
// children. The strong node's value is used if the two nodes are leafs, are
|
||||
// both valid, and are compatible types (defined by isCompatible()). The weak
|
||||
// node is returned in all other cases. coerceNodes is used to counteract the
|
||||
// effects of yaml's automatic type conversion. The weak node is the one
|
||||
// resulting from unmarshalling into an empty interface{} (the type is
|
||||
// inferred). The strong node is the one resulting from unmarshalling into a
|
||||
// struct. If the two nodes are of compatible types, the yaml library correctly
|
||||
// parsed the value into the strongly typed unmarshalling. In this case, we
|
||||
// prefer the strong node because its actually the type we are expecting.
|
||||
func coerceNodes(w, s Node) Node {
|
||||
n := w
|
||||
n.children = nil
|
||||
if len(w.children) == 0 && len(s.children) == 0 &&
|
||||
w.IsValid() && s.IsValid() &&
|
||||
isCompatible(w.Kind(), s.Kind()) {
|
||||
n.Value = s.Value
|
||||
}
|
||||
|
||||
for _, cw := range w.children {
|
||||
n.children = append(n.children, coerceNodes(cw, s.Child(cw.name)))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// normalizeNodeNames replaces all occurences of '-' with '_' within key names
|
||||
// and makes a note of each replacement in the report.
|
||||
func normalizeNodeNames(node Node, report *Report) Node {
|
||||
if strings.Contains(node.name, "-") {
|
||||
// TODO(crawford): Enable this message once the new validator hits stable.
|
||||
//report.Info(node.line, fmt.Sprintf("%q uses '-' instead of '_'", node.name))
|
||||
node.name = strings.Replace(node.name, "-", "_", -1)
|
||||
}
|
||||
for i := range node.children {
|
||||
node.children[i] = normalizeNodeNames(node.children[i], report)
|
||||
}
|
||||
return node
|
||||
}
|
||||
177
config/cloudinit/config/validate/validate_test.go
Normal file
177
config/cloudinit/config/validate/validate_test.go
Normal file
@@ -0,0 +1,177 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package validate
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestParseCloudConfig(t *testing.T) {
|
||||
tests := []struct {
|
||||
config string
|
||||
|
||||
entries []Entry
|
||||
}{
|
||||
{},
|
||||
{
|
||||
config: " ",
|
||||
entries: []Entry{{entryError, "found character that cannot start any token", 1}},
|
||||
},
|
||||
{
|
||||
config: "a:\na",
|
||||
entries: []Entry{{entryError, "could not find expected ':'", 2}},
|
||||
},
|
||||
{
|
||||
config: "#hello\na:\na",
|
||||
entries: []Entry{{entryError, "could not find expected ':'", 3}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
r := Report{}
|
||||
parseCloudConfig([]byte(tt.config), &r)
|
||||
|
||||
if e := r.Entries(); !reflect.DeepEqual(tt.entries, e) {
|
||||
t.Errorf("bad report (%s): want %#v, got %#v", tt.config, tt.entries, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateCloudConfig(t *testing.T) {
|
||||
tests := []struct {
|
||||
config string
|
||||
rules []rule
|
||||
|
||||
report Report
|
||||
err error
|
||||
}{
|
||||
{
|
||||
rules: []rule{func(_ Node, _ *Report) { panic("something happened") }},
|
||||
err: errors.New("something happened"),
|
||||
},
|
||||
{
|
||||
config: "write_files:\n - permissions: 0744",
|
||||
rules: Rules,
|
||||
},
|
||||
{
|
||||
config: "write_files:\n - permissions: '0744'",
|
||||
rules: Rules,
|
||||
},
|
||||
{
|
||||
config: "write_files:\n - permissions: 744",
|
||||
rules: Rules,
|
||||
},
|
||||
{
|
||||
config: "write_files:\n - permissions: '744'",
|
||||
rules: Rules,
|
||||
},
|
||||
{
|
||||
config: "coreos:\n update:\n reboot-strategy: off",
|
||||
rules: Rules,
|
||||
},
|
||||
{
|
||||
config: "coreos:\n update:\n reboot-strategy: false",
|
||||
rules: Rules,
|
||||
report: Report{entries: []Entry{{entryError, "invalid value false", 3}}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
r, err := validateCloudConfig([]byte(tt.config), tt.rules)
|
||||
if !reflect.DeepEqual(tt.err, err) {
|
||||
t.Errorf("bad error (%s): want %v, got %v", tt.config, tt.err, err)
|
||||
}
|
||||
if !reflect.DeepEqual(tt.report, r) {
|
||||
t.Errorf("bad report (%s): want %+v, got %+v", tt.config, tt.report, r)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidate(t *testing.T) {
|
||||
tests := []struct {
|
||||
config string
|
||||
|
||||
report Report
|
||||
}{
|
||||
{},
|
||||
{
|
||||
config: "#!/bin/bash\necho hey",
|
||||
},
|
||||
{
|
||||
config: "{}",
|
||||
report: Report{entries: []Entry{{entryError, `must be "#cloud-config" or begin with "#!"`, 1}}},
|
||||
},
|
||||
{
|
||||
config: `{"ignitionVersion":0}`,
|
||||
},
|
||||
{
|
||||
config: `{"ignitionVersion":1}`,
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
r, err := Validate([]byte(tt.config))
|
||||
if err != nil {
|
||||
t.Errorf("bad error (case #%d): want %v, got %v", i, nil, err)
|
||||
}
|
||||
if !reflect.DeepEqual(tt.report, r) {
|
||||
t.Errorf("bad report (case #%d): want %+v, got %+v", i, tt.report, r)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkValidate(b *testing.B) {
|
||||
config := `#cloud-config
|
||||
hostname: test
|
||||
|
||||
coreos:
|
||||
etcd:
|
||||
name: node001
|
||||
discovery: https://discovery.etcd.io/disco
|
||||
addr: $public_ipv4:4001
|
||||
peer-addr: $private_ipv4:7001
|
||||
fleet:
|
||||
verbosity: 2
|
||||
metadata: "hi"
|
||||
update:
|
||||
reboot-strategy: off
|
||||
units:
|
||||
- name: hi.service
|
||||
command: start
|
||||
enable: true
|
||||
- name: bye.service
|
||||
command: stop
|
||||
|
||||
ssh_authorized_keys:
|
||||
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC0g+ZTxC7weoIJLUafOgrm+h...
|
||||
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC0g+ZTxC7weoIJLUafOgrm+h...
|
||||
|
||||
users:
|
||||
- name: me
|
||||
|
||||
write_files:
|
||||
- path: /etc/yes
|
||||
content: "Hi"
|
||||
|
||||
manage_etc_hosts: localhost`
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
if _, err := Validate([]byte(config)); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
169
config/cloudinit/datasource/configdrive/configdrive.go
Executable file
169
config/cloudinit/datasource/configdrive/configdrive.go
Executable file
@@ -0,0 +1,169 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package configdrive
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"syscall"
|
||||
|
||||
"github.com/rancher/os/log"
|
||||
|
||||
"github.com/docker/docker/pkg/mount"
|
||||
"github.com/rancher/os/config/cloudinit/datasource"
|
||||
"github.com/rancher/os/util"
|
||||
)
|
||||
|
||||
const (
|
||||
configDevName = "config-2"
|
||||
configDev = "LABEL=" + configDevName
|
||||
configDevMountPoint = "/media/config-2"
|
||||
openstackAPIVersion = "latest"
|
||||
)
|
||||
|
||||
type ConfigDrive struct {
|
||||
root string
|
||||
readFile func(filename string) ([]byte, error)
|
||||
lastError error
|
||||
availabilityChanges bool
|
||||
}
|
||||
|
||||
func NewDatasource(root string) *ConfigDrive {
|
||||
return &ConfigDrive{root, ioutil.ReadFile, nil, true}
|
||||
}
|
||||
|
||||
func (cd *ConfigDrive) IsAvailable() bool {
|
||||
if cd.root == configDevMountPoint {
|
||||
cd.lastError = MountConfigDrive()
|
||||
if cd.lastError != nil {
|
||||
log.Error(cd.lastError)
|
||||
// Don't keep retrying if we can't mount
|
||||
cd.availabilityChanges = false
|
||||
return false
|
||||
}
|
||||
defer cd.Finish()
|
||||
}
|
||||
|
||||
_, cd.lastError = os.Stat(cd.root)
|
||||
return !os.IsNotExist(cd.lastError)
|
||||
// TODO: consider changing IsNotExists to not-available _and_ does not change
|
||||
}
|
||||
|
||||
func (cd *ConfigDrive) Finish() error {
|
||||
return UnmountConfigDrive()
|
||||
}
|
||||
|
||||
func (cd *ConfigDrive) String() string {
|
||||
if cd.lastError != nil {
|
||||
return fmt.Sprintf("%s: %s (lastError: %s)", cd.Type(), cd.root, cd.lastError)
|
||||
}
|
||||
return fmt.Sprintf("%s: %s", cd.Type(), cd.root)
|
||||
}
|
||||
|
||||
func (cd *ConfigDrive) AvailabilityChanges() bool {
|
||||
return cd.availabilityChanges
|
||||
}
|
||||
|
||||
func (cd *ConfigDrive) ConfigRoot() string {
|
||||
return cd.openstackRoot()
|
||||
}
|
||||
|
||||
func (cd *ConfigDrive) FetchMetadata() (metadata datasource.Metadata, err error) {
|
||||
var data []byte
|
||||
var m struct {
|
||||
SSHAuthorizedKeyMap map[string]string `json:"public_keys"`
|
||||
Hostname string `json:"hostname"`
|
||||
NetworkConfig struct {
|
||||
ContentPath string `json:"content_path"`
|
||||
} `json:"network_config"`
|
||||
}
|
||||
|
||||
if data, err = cd.tryReadFile(path.Join(cd.openstackVersionRoot(), "meta_data.json")); err != nil || len(data) == 0 {
|
||||
return
|
||||
}
|
||||
if err = json.Unmarshal([]byte(data), &m); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
metadata.SSHPublicKeys = m.SSHAuthorizedKeyMap
|
||||
metadata.Hostname = m.Hostname
|
||||
// TODO: I don't think we've used this for anything
|
||||
/* if m.NetworkConfig.ContentPath != "" {
|
||||
metadata.NetworkConfig, err = cd.tryReadFile(path.Join(cd.openstackRoot(), m.NetworkConfig.ContentPath))
|
||||
}
|
||||
*/
|
||||
return
|
||||
}
|
||||
|
||||
func (cd *ConfigDrive) FetchUserdata() ([]byte, error) {
|
||||
return cd.tryReadFile(path.Join(cd.openstackVersionRoot(), "user_data"))
|
||||
}
|
||||
|
||||
func (cd *ConfigDrive) Type() string {
|
||||
return "cloud-drive"
|
||||
}
|
||||
|
||||
func (cd *ConfigDrive) openstackRoot() string {
|
||||
return path.Join(cd.root, "openstack")
|
||||
}
|
||||
|
||||
func (cd *ConfigDrive) openstackVersionRoot() string {
|
||||
return path.Join(cd.openstackRoot(), openstackAPIVersion)
|
||||
}
|
||||
|
||||
func (cd *ConfigDrive) tryReadFile(filename string) ([]byte, error) {
|
||||
if cd.root == configDevMountPoint {
|
||||
cd.lastError = MountConfigDrive()
|
||||
if cd.lastError != nil {
|
||||
log.Error(cd.lastError)
|
||||
return nil, cd.lastError
|
||||
}
|
||||
defer cd.Finish()
|
||||
}
|
||||
log.Debugf("Attempting to read from %q\n", filename)
|
||||
data, err := cd.readFile(filename)
|
||||
if os.IsNotExist(err) {
|
||||
err = nil
|
||||
}
|
||||
if err != nil {
|
||||
log.Errorf("ERROR read cloud-config file(%s) - err: %q", filename, err)
|
||||
}
|
||||
return data, err
|
||||
}
|
||||
|
||||
func MountConfigDrive() error {
|
||||
if err := os.MkdirAll(configDevMountPoint, 700); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
configDev := util.ResolveDevice(configDev)
|
||||
|
||||
if configDev == "" {
|
||||
return mount.Mount(configDevName, configDevMountPoint, "9p", "trans=virtio,version=9p2000.L")
|
||||
}
|
||||
|
||||
fsType, err := util.GetFsType(configDev)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return mount.Mount(configDev, configDevMountPoint, fsType, "ro")
|
||||
}
|
||||
|
||||
func UnmountConfigDrive() error {
|
||||
return syscall.Unmount(configDevMountPoint, 0)
|
||||
}
|
||||
144
config/cloudinit/datasource/configdrive/configdrive_test.go
Executable file
144
config/cloudinit/datasource/configdrive/configdrive_test.go
Executable file
@@ -0,0 +1,144 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package configdrive
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/rancher/os/config/cloudinit/datasource"
|
||||
"github.com/rancher/os/config/cloudinit/datasource/test"
|
||||
)
|
||||
|
||||
func TestFetchMetadata(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
root string
|
||||
files test.MockFilesystem
|
||||
|
||||
metadata datasource.Metadata
|
||||
}{
|
||||
{
|
||||
root: "/",
|
||||
files: test.NewMockFilesystem(test.File{Path: "/openstack/latest/meta_data.json", Contents: ""}),
|
||||
},
|
||||
{
|
||||
root: "/",
|
||||
files: test.NewMockFilesystem(test.File{Path: "/openstack/latest/meta_data.json", Contents: `{"ignore": "me"}`}),
|
||||
},
|
||||
{
|
||||
root: "/",
|
||||
files: test.NewMockFilesystem(test.File{Path: "/openstack/latest/meta_data.json", Contents: `{"hostname": "host"}`}),
|
||||
metadata: datasource.Metadata{Hostname: "host"},
|
||||
},
|
||||
{
|
||||
root: "/media/configdrive",
|
||||
files: test.NewMockFilesystem(test.File{Path: "/media/configdrive/openstack/latest/meta_data.json", Contents: `{"hostname": "host", "network_config": {"content_path": "config_file.json"}, "public_keys":{"1": "key1", "2": "key2"}}`},
|
||||
test.File{Path: "/media/configdrive/openstack/config_file.json", Contents: "make it work"},
|
||||
),
|
||||
metadata: datasource.Metadata{
|
||||
Hostname: "host",
|
||||
SSHPublicKeys: map[string]string{
|
||||
"1": "key1",
|
||||
"2": "key2",
|
||||
},
|
||||
},
|
||||
},
|
||||
} {
|
||||
cd := ConfigDrive{tt.root, tt.files.ReadFile, nil, true}
|
||||
metadata, err := cd.FetchMetadata()
|
||||
if err != nil {
|
||||
t.Fatalf("bad error for %+v: want %v, got %q", tt, nil, err)
|
||||
}
|
||||
if !reflect.DeepEqual(tt.metadata, metadata) {
|
||||
t.Fatalf("bad metadata for %+v: want %#v, got %#v", tt, tt.metadata, metadata)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFetchUserdata(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
root string
|
||||
files test.MockFilesystem
|
||||
|
||||
userdata string
|
||||
}{
|
||||
{
|
||||
"/",
|
||||
test.NewMockFilesystem(),
|
||||
"",
|
||||
},
|
||||
{
|
||||
"/",
|
||||
test.NewMockFilesystem(test.File{Path: "/openstack/latest/user_data", Contents: "userdata"}),
|
||||
"userdata",
|
||||
},
|
||||
{
|
||||
"/media/configdrive",
|
||||
test.NewMockFilesystem(test.File{Path: "/media/configdrive/openstack/latest/user_data", Contents: "userdata"}),
|
||||
"userdata",
|
||||
},
|
||||
} {
|
||||
cd := ConfigDrive{tt.root, tt.files.ReadFile, nil, true}
|
||||
userdata, err := cd.FetchUserdata()
|
||||
if err != nil {
|
||||
t.Fatalf("bad error for %+v: want %v, got %q", tt, nil, err)
|
||||
}
|
||||
if string(userdata) != tt.userdata {
|
||||
t.Fatalf("bad userdata for %+v: want %q, got %q", tt, tt.userdata, userdata)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigRoot(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
root string
|
||||
configRoot string
|
||||
}{
|
||||
{
|
||||
"/",
|
||||
"/openstack",
|
||||
},
|
||||
{
|
||||
"/media/configdrive",
|
||||
"/media/configdrive/openstack",
|
||||
},
|
||||
} {
|
||||
cd := ConfigDrive{tt.root, nil, nil, true}
|
||||
if configRoot := cd.ConfigRoot(); configRoot != tt.configRoot {
|
||||
t.Fatalf("bad config root for %q: want %q, got %q", tt, tt.configRoot, configRoot)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewDatasource(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
root string
|
||||
expectRoot string
|
||||
}{
|
||||
{
|
||||
root: "",
|
||||
expectRoot: "",
|
||||
},
|
||||
{
|
||||
root: "/media/configdrive",
|
||||
expectRoot: "/media/configdrive",
|
||||
},
|
||||
} {
|
||||
service := NewDatasource(tt.root)
|
||||
if service.root != tt.expectRoot {
|
||||
t.Fatalf("bad root (%q): want %q, got %q", tt.root, tt.expectRoot, service.root)
|
||||
}
|
||||
}
|
||||
}
|
||||
18
vendor/github.com/coreos/coreos-cloudinit/datasource/datasource.go → config/cloudinit/datasource/datasource.go
Normal file → Executable file
18
vendor/github.com/coreos/coreos-cloudinit/datasource/datasource.go → config/cloudinit/datasource/datasource.go
Normal file → Executable file
@@ -16,6 +16,8 @@ package datasource
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
"github.com/rancher/os/netconf"
|
||||
)
|
||||
|
||||
type Datasource interface {
|
||||
@@ -25,14 +27,20 @@ type Datasource interface {
|
||||
FetchMetadata() (Metadata, error)
|
||||
FetchUserdata() ([]byte, error)
|
||||
Type() string
|
||||
String() string
|
||||
// Finish gives the datasource the oportunity to clean up, unmount or release any open / cache resources
|
||||
Finish() error
|
||||
}
|
||||
|
||||
type Metadata struct {
|
||||
PublicIPv4 net.IP
|
||||
PublicIPv6 net.IP
|
||||
PrivateIPv4 net.IP
|
||||
PrivateIPv6 net.IP
|
||||
// TODO: move to netconf/types.go ?
|
||||
// see https://ahmetalpbalkan.com/blog/comparison-of-instance-metadata-services/
|
||||
Hostname string
|
||||
SSHPublicKeys map[string]string
|
||||
NetworkConfig interface{}
|
||||
NetworkConfig netconf.NetworkConfig
|
||||
|
||||
PublicIPv4 net.IP
|
||||
PublicIPv6 net.IP
|
||||
PrivateIPv4 net.IP
|
||||
PrivateIPv6 net.IP
|
||||
}
|
||||
36
vendor/github.com/coreos/coreos-cloudinit/datasource/file/file.go → config/cloudinit/datasource/file/file.go
Normal file → Executable file
36
vendor/github.com/coreos/coreos-cloudinit/datasource/file/file.go → config/cloudinit/datasource/file/file.go
Normal file → Executable file
@@ -15,41 +15,51 @@
|
||||
package file
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/coreos/coreos-cloudinit/datasource"
|
||||
"github.com/rancher/os/config/cloudinit/datasource"
|
||||
)
|
||||
|
||||
type localFile struct {
|
||||
path string
|
||||
type LocalFile struct {
|
||||
path string
|
||||
lastError error
|
||||
}
|
||||
|
||||
func NewDatasource(path string) *localFile {
|
||||
return &localFile{path}
|
||||
func NewDatasource(path string) *LocalFile {
|
||||
return &LocalFile{path, nil}
|
||||
}
|
||||
|
||||
func (f *localFile) IsAvailable() bool {
|
||||
_, err := os.Stat(f.path)
|
||||
return !os.IsNotExist(err)
|
||||
func (f *LocalFile) IsAvailable() bool {
|
||||
_, f.lastError = os.Stat(f.path)
|
||||
return !os.IsNotExist(f.lastError)
|
||||
}
|
||||
|
||||
func (f *localFile) AvailabilityChanges() bool {
|
||||
func (f *LocalFile) Finish() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *LocalFile) String() string {
|
||||
return fmt.Sprintf("%s: %s (lastError: %s)", f.Type(), f.path, f.lastError)
|
||||
}
|
||||
|
||||
func (f *LocalFile) AvailabilityChanges() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (f *localFile) ConfigRoot() string {
|
||||
func (f *LocalFile) ConfigRoot() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (f *localFile) FetchMetadata() (datasource.Metadata, error) {
|
||||
func (f *LocalFile) FetchMetadata() (datasource.Metadata, error) {
|
||||
return datasource.Metadata{}, nil
|
||||
}
|
||||
|
||||
func (f *localFile) FetchUserdata() ([]byte, error) {
|
||||
func (f *LocalFile) FetchUserdata() ([]byte, error) {
|
||||
return ioutil.ReadFile(f.path)
|
||||
}
|
||||
|
||||
func (f *localFile) Type() string {
|
||||
func (f *LocalFile) Type() string {
|
||||
return "local-file"
|
||||
}
|
||||
195
config/cloudinit/datasource/metadata/digitalocean/metadata.go
Executable file
195
config/cloudinit/datasource/metadata/digitalocean/metadata.go
Executable file
@@ -0,0 +1,195 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package digitalocean
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/rancher/os/netconf"
|
||||
|
||||
"net"
|
||||
|
||||
"github.com/rancher/os/config/cloudinit/datasource"
|
||||
"github.com/rancher/os/config/cloudinit/datasource/metadata"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultAddress = "http://169.254.169.254/"
|
||||
apiVersion = "metadata/v1"
|
||||
userdataURL = apiVersion + "/user-data"
|
||||
metadataPath = apiVersion + ".json"
|
||||
)
|
||||
|
||||
type Address struct {
|
||||
IPAddress string `json:"ip_address"`
|
||||
Netmask string `json:"netmask"`
|
||||
Cidr int `json:"cidr"`
|
||||
Gateway string `json:"gateway"`
|
||||
}
|
||||
|
||||
type Interface struct {
|
||||
IPv4 *Address `json:"ipv4"`
|
||||
IPv6 *Address `json:"ipv6"`
|
||||
AnchorIPv4 *Address `json:"anchor_ipv4"`
|
||||
MAC string `json:"mac"`
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
type Interfaces struct {
|
||||
Public []Interface `json:"public"`
|
||||
Private []Interface `json:"private"`
|
||||
}
|
||||
|
||||
type DNS struct {
|
||||
Nameservers []string `json:"nameservers"`
|
||||
}
|
||||
|
||||
type Metadata struct {
|
||||
Hostname string `json:"hostname"`
|
||||
Interfaces Interfaces `json:"interfaces"`
|
||||
PublicKeys []string `json:"public_keys"`
|
||||
DNS DNS `json:"dns"`
|
||||
}
|
||||
|
||||
type MetadataService struct {
|
||||
metadata.Service
|
||||
}
|
||||
|
||||
func NewDatasource(root string) *MetadataService {
|
||||
if root == "" {
|
||||
root = DefaultAddress
|
||||
}
|
||||
return &MetadataService{Service: metadata.NewDatasource(root, apiVersion, userdataURL, metadataPath, nil)}
|
||||
}
|
||||
|
||||
func (ms MetadataService) AvailabilityChanges() bool {
|
||||
// TODO: if it can't find the network, maybe we can start it?
|
||||
return false
|
||||
}
|
||||
|
||||
// Parse IPv4 netmask written in IP form (e.g. "255.255.255.0").
|
||||
func ipmask(addr *Address) string {
|
||||
ip := net.ParseIP(addr.IPAddress)
|
||||
var mask net.IPMask
|
||||
if addr.Netmask != "" {
|
||||
mask = net.IPMask(net.ParseIP(addr.Netmask))
|
||||
} else {
|
||||
mask = net.CIDRMask(addr.Cidr, 32)
|
||||
}
|
||||
ipnet := net.IPNet{
|
||||
IP: ip,
|
||||
Mask: mask,
|
||||
}
|
||||
return ipnet.String()
|
||||
}
|
||||
|
||||
func (ms *MetadataService) FetchMetadata() (metadata datasource.Metadata, err error) {
|
||||
var data []byte
|
||||
var m Metadata
|
||||
|
||||
if data, err = ms.FetchData(ms.MetadataURL()); err != nil || len(data) == 0 {
|
||||
return
|
||||
}
|
||||
if err = json.Unmarshal(data, &m); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if len(m.Interfaces.Public) > 0 {
|
||||
if m.Interfaces.Public[0].IPv4 != nil {
|
||||
metadata.PublicIPv4 = net.ParseIP(m.Interfaces.Public[0].IPv4.IPAddress)
|
||||
}
|
||||
if m.Interfaces.Public[0].IPv6 != nil {
|
||||
metadata.PublicIPv6 = net.ParseIP(m.Interfaces.Public[0].IPv6.IPAddress)
|
||||
}
|
||||
}
|
||||
if len(m.Interfaces.Private) > 0 {
|
||||
if m.Interfaces.Private[0].IPv4 != nil {
|
||||
metadata.PrivateIPv4 = net.ParseIP(m.Interfaces.Private[0].IPv4.IPAddress)
|
||||
}
|
||||
if m.Interfaces.Private[0].IPv6 != nil {
|
||||
metadata.PrivateIPv6 = net.ParseIP(m.Interfaces.Private[0].IPv6.IPAddress)
|
||||
}
|
||||
}
|
||||
|
||||
metadata.NetworkConfig.Interfaces = make(map[string]netconf.InterfaceConfig)
|
||||
|
||||
ethNumber := 0
|
||||
|
||||
for _, eth := range m.Interfaces.Public {
|
||||
network := netconf.InterfaceConfig{}
|
||||
|
||||
if eth.IPv4 != nil {
|
||||
network.Gateway = eth.IPv4.Gateway
|
||||
|
||||
network.Addresses = append(network.Addresses, ipmask(eth.IPv4))
|
||||
if metadata.PublicIPv4 == nil {
|
||||
metadata.PublicIPv4 = net.ParseIP(eth.IPv4.IPAddress)
|
||||
}
|
||||
}
|
||||
if eth.AnchorIPv4 != nil {
|
||||
network.Addresses = append(network.Addresses, ipmask(eth.AnchorIPv4))
|
||||
}
|
||||
if eth.IPv6 != nil {
|
||||
network.Addresses = append(network.Addresses, fmt.Sprintf("%s/%d", eth.IPv6.IPAddress, eth.IPv6.Cidr))
|
||||
network.GatewayIpv6 = eth.IPv6.Gateway
|
||||
if metadata.PublicIPv6 == nil {
|
||||
metadata.PublicIPv6 = net.ParseIP(eth.IPv6.IPAddress)
|
||||
}
|
||||
}
|
||||
metadata.NetworkConfig.Interfaces[fmt.Sprintf("eth%d", ethNumber)] = network
|
||||
ethNumber = ethNumber + 1
|
||||
}
|
||||
|
||||
for _, eth := range m.Interfaces.Private {
|
||||
network := netconf.InterfaceConfig{}
|
||||
if eth.IPv4 != nil {
|
||||
network.Gateway = eth.IPv4.Gateway
|
||||
|
||||
network.Addresses = append(network.Addresses, ipmask(eth.IPv4))
|
||||
|
||||
if metadata.PrivateIPv4 == nil {
|
||||
metadata.PrivateIPv4 = net.ParseIP(eth.IPv6.IPAddress)
|
||||
}
|
||||
}
|
||||
if eth.AnchorIPv4 != nil {
|
||||
network.Addresses = append(network.Addresses, ipmask(eth.AnchorIPv4))
|
||||
}
|
||||
if eth.IPv6 != nil {
|
||||
network.Addresses = append(network.Addresses, fmt.Sprintf("%s/%d", eth.IPv6.IPAddress, eth.IPv6.Cidr))
|
||||
network.GatewayIpv6 = eth.IPv6.Gateway
|
||||
if metadata.PrivateIPv6 == nil {
|
||||
metadata.PrivateIPv6 = net.ParseIP(eth.IPv6.IPAddress)
|
||||
}
|
||||
}
|
||||
metadata.NetworkConfig.Interfaces[fmt.Sprintf("eth%d", ethNumber)] = network
|
||||
ethNumber = ethNumber + 1
|
||||
}
|
||||
|
||||
metadata.NetworkConfig.DNS.Nameservers = m.DNS.Nameservers
|
||||
|
||||
metadata.Hostname = m.Hostname
|
||||
metadata.SSHPublicKeys = map[string]string{}
|
||||
for i, key := range m.PublicKeys {
|
||||
metadata.SSHPublicKeys[strconv.Itoa(i)] = key
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (ms MetadataService) Type() string {
|
||||
return "digitalocean-metadata-service"
|
||||
}
|
||||
142
config/cloudinit/datasource/metadata/digitalocean/metadata_test.go
Executable file
142
config/cloudinit/datasource/metadata/digitalocean/metadata_test.go
Executable file
@@ -0,0 +1,142 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package digitalocean
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/rancher/os/netconf"
|
||||
|
||||
"github.com/rancher/os/config/cloudinit/datasource"
|
||||
"github.com/rancher/os/config/cloudinit/datasource/metadata"
|
||||
"github.com/rancher/os/config/cloudinit/datasource/metadata/test"
|
||||
"github.com/rancher/os/config/cloudinit/pkg"
|
||||
)
|
||||
|
||||
func TestType(t *testing.T) {
|
||||
want := "digitalocean-metadata-service"
|
||||
if kind := (MetadataService{}).Type(); kind != want {
|
||||
t.Fatalf("bad type: want %q, got %q", want, kind)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFetchMetadata(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
root string
|
||||
metadataPath string
|
||||
resources map[string]string
|
||||
expect datasource.Metadata
|
||||
clientErr error
|
||||
expectErr error
|
||||
}{
|
||||
{
|
||||
root: "/",
|
||||
metadataPath: "v1.json",
|
||||
resources: map[string]string{
|
||||
"/v1.json": "bad",
|
||||
},
|
||||
expectErr: fmt.Errorf("invalid character 'b' looking for beginning of value"),
|
||||
},
|
||||
{
|
||||
root: "/",
|
||||
metadataPath: "v1.json",
|
||||
resources: map[string]string{
|
||||
"/v1.json": `{
|
||||
"droplet_id": 1,
|
||||
"user_data": "hello",
|
||||
"vendor_data": "hello",
|
||||
"public_keys": [
|
||||
"publickey1",
|
||||
"publickey2"
|
||||
],
|
||||
"region": "nyc2",
|
||||
"interfaces": {
|
||||
"public": [
|
||||
{
|
||||
"ipv4": {
|
||||
"ip_address": "192.168.1.2",
|
||||
"netmask": "255.255.255.0",
|
||||
"gateway": "192.168.1.1"
|
||||
},
|
||||
"ipv6": {
|
||||
"ip_address": "fe00::",
|
||||
"cidr": 126,
|
||||
"gateway": "fe00::"
|
||||
},
|
||||
"mac": "ab:cd:ef:gh:ij",
|
||||
"type": "public"
|
||||
}
|
||||
]
|
||||
}
|
||||
}`,
|
||||
},
|
||||
expect: datasource.Metadata{
|
||||
PublicIPv4: net.ParseIP("192.168.1.2"),
|
||||
PublicIPv6: net.ParseIP("fe00::"),
|
||||
SSHPublicKeys: map[string]string{
|
||||
"0": "publickey1",
|
||||
"1": "publickey2",
|
||||
},
|
||||
NetworkConfig: netconf.NetworkConfig{
|
||||
Interfaces: map[string]netconf.InterfaceConfig{
|
||||
"eth0": netconf.InterfaceConfig{
|
||||
Addresses: []string{
|
||||
"192.168.1.2/24",
|
||||
"fe00::/126",
|
||||
},
|
||||
//Netmask: "255.255.255.0",
|
||||
Gateway: "192.168.1.1",
|
||||
|
||||
//Cidr: 126,
|
||||
GatewayIpv6: "fe00::",
|
||||
//MAC: "ab:cd:ef:gh:ij",
|
||||
//Type: "public",
|
||||
},
|
||||
},
|
||||
//PublicKeys: []string{"publickey1", "publickey2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
clientErr: pkg.ErrTimeout{Err: fmt.Errorf("test error")},
|
||||
expectErr: pkg.ErrTimeout{Err: fmt.Errorf("test error")},
|
||||
},
|
||||
} {
|
||||
service := &MetadataService{
|
||||
Service: metadata.Service{
|
||||
Root: tt.root,
|
||||
Client: &test.HTTPClient{Resources: tt.resources, Err: tt.clientErr},
|
||||
MetadataPath: tt.metadataPath,
|
||||
},
|
||||
}
|
||||
metadata, err := service.FetchMetadata()
|
||||
if Error(err) != Error(tt.expectErr) {
|
||||
t.Fatalf("bad error (%q): \nwant %#v,\n got %#v", tt.resources, tt.expectErr, err)
|
||||
}
|
||||
if !reflect.DeepEqual(tt.expect, metadata) {
|
||||
t.Fatalf("bad fetch (%q): \nwant %#v,\n got %#v", tt.resources, tt.expect, metadata)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func Error(err error) string {
|
||||
if err != nil {
|
||||
return err.Error()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
169
config/cloudinit/datasource/metadata/ec2/metadata.go
Executable file
169
config/cloudinit/datasource/metadata/ec2/metadata.go
Executable file
@@ -0,0 +1,169 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package ec2
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"strings"
|
||||
|
||||
"github.com/rancher/os/netconf"
|
||||
|
||||
"github.com/rancher/os/config/cloudinit/datasource"
|
||||
"github.com/rancher/os/config/cloudinit/datasource/metadata"
|
||||
"github.com/rancher/os/config/cloudinit/pkg"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultAddress = "http://169.254.169.254/"
|
||||
apiVersion = "latest/"
|
||||
userdataPath = apiVersion + "user-data/"
|
||||
metadataPath = apiVersion + "meta-data/"
|
||||
)
|
||||
|
||||
type MetadataService struct {
|
||||
metadata.Service
|
||||
}
|
||||
|
||||
func NewDatasource(root string) *MetadataService {
|
||||
if root == "" {
|
||||
root = DefaultAddress
|
||||
}
|
||||
return &MetadataService{metadata.NewDatasource(root, apiVersion, userdataPath, metadataPath, nil)}
|
||||
}
|
||||
|
||||
func (ms MetadataService) AvailabilityChanges() bool {
|
||||
// TODO: if it can't find the network, maybe we can start it?
|
||||
return false
|
||||
}
|
||||
|
||||
func (ms MetadataService) FetchMetadata() (datasource.Metadata, error) {
|
||||
// see http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html
|
||||
metadata := datasource.Metadata{}
|
||||
metadata.NetworkConfig = netconf.NetworkConfig{}
|
||||
|
||||
if keynames, err := ms.fetchAttributes("public-keys"); err == nil {
|
||||
keyIDs := make(map[string]string)
|
||||
for _, keyname := range keynames {
|
||||
tokens := strings.SplitN(keyname, "=", 2)
|
||||
if len(tokens) != 2 {
|
||||
return metadata, fmt.Errorf("malformed public key: %q", keyname)
|
||||
}
|
||||
keyIDs[tokens[1]] = tokens[0]
|
||||
}
|
||||
|
||||
metadata.SSHPublicKeys = map[string]string{}
|
||||
for name, id := range keyIDs {
|
||||
sshkey, err := ms.fetchAttribute(fmt.Sprintf("public-keys/%s/openssh-key", id))
|
||||
if err != nil {
|
||||
return metadata, err
|
||||
}
|
||||
metadata.SSHPublicKeys[name] = sshkey
|
||||
log.Printf("Found SSH key for %q\n", name)
|
||||
}
|
||||
} else if _, ok := err.(pkg.ErrNotFound); !ok {
|
||||
return metadata, err
|
||||
}
|
||||
|
||||
if hostname, err := ms.fetchAttribute("hostname"); err == nil {
|
||||
metadata.Hostname = strings.Split(hostname, " ")[0]
|
||||
} else if _, ok := err.(pkg.ErrNotFound); !ok {
|
||||
return metadata, err
|
||||
}
|
||||
|
||||
// TODO: these are only on the first interface - it looks like you can have as many as you need...
|
||||
if localAddr, err := ms.fetchAttribute("local-ipv4"); err == nil {
|
||||
metadata.PrivateIPv4 = net.ParseIP(localAddr)
|
||||
} else if _, ok := err.(pkg.ErrNotFound); !ok {
|
||||
return metadata, err
|
||||
}
|
||||
if publicAddr, err := ms.fetchAttribute("public-ipv4"); err == nil {
|
||||
metadata.PublicIPv4 = net.ParseIP(publicAddr)
|
||||
} else if _, ok := err.(pkg.ErrNotFound); !ok {
|
||||
return metadata, err
|
||||
}
|
||||
|
||||
metadata.NetworkConfig.Interfaces = make(map[string]netconf.InterfaceConfig)
|
||||
if macs, err := ms.fetchAttributes("network/interfaces/macs"); err != nil {
|
||||
for _, mac := range macs {
|
||||
if deviceNumber, err := ms.fetchAttribute(fmt.Sprintf("network/interfaces/macs/%s/device-number", mac)); err != nil {
|
||||
network := netconf.InterfaceConfig{
|
||||
DHCP: true,
|
||||
}
|
||||
/* Looks like we must use DHCP for aws
|
||||
// private ipv4
|
||||
if subnetCidrBlock, err := ms.fetchAttribute(fmt.Sprintf("network/interfaces/macs/%s/subnet-ipv4-cidr-block", mac)); err != nil {
|
||||
cidr := strings.Split(subnetCidrBlock, "/")
|
||||
if localAddr, err := ms.fetchAttributes(fmt.Sprintf("network/interfaces/macs/%s/local-ipv4s", mac)); err != nil {
|
||||
for _, addr := range localAddr {
|
||||
network.Addresses = append(network.Addresses, addr+"/"+cidr[1])
|
||||
}
|
||||
}
|
||||
}
|
||||
// ipv6
|
||||
if localAddr, err := ms.fetchAttributes(fmt.Sprintf("network/interfaces/macs/%s/ipv6s", mac)); err != nil {
|
||||
if subnetCidrBlock, err := ms.fetchAttributes(fmt.Sprintf("network/interfaces/macs/%s/subnet-ipv6-cidr-block", mac)); err != nil {
|
||||
for i, addr := range localAddr {
|
||||
cidr := strings.Split(subnetCidrBlock[i], "/")
|
||||
network.Addresses = append(network.Addresses, addr+"/"+cidr[1])
|
||||
}
|
||||
}
|
||||
}
|
||||
*/
|
||||
// disabled - it looks to me like you don't actually put the public IP on the eth device
|
||||
/* if publicAddr, err := ms.fetchAttributes(fmt.Sprintf("network/interfaces/macs/%s/public-ipv4s", mac)); err != nil {
|
||||
if vpcCidrBlock, err := ms.fetchAttribute(fmt.Sprintf("network/interfaces/macs/%s/vpc-ipv4-cidr-block", mac)); err != nil {
|
||||
cidr := strings.Split(vpcCidrBlock, "/")
|
||||
network.Addresses = append(network.Addresses, publicAddr+"/"+cidr[1])
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
metadata.NetworkConfig.Interfaces["eth"+deviceNumber] = network
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return metadata, nil
|
||||
}
|
||||
|
||||
func (ms MetadataService) Type() string {
|
||||
return "ec2-metadata-service"
|
||||
}
|
||||
|
||||
func (ms MetadataService) fetchAttributes(key string) ([]string, error) {
|
||||
url := ms.MetadataURL() + key
|
||||
resp, err := ms.FetchData(url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
scanner := bufio.NewScanner(bytes.NewBuffer(resp))
|
||||
data := make([]string, 0)
|
||||
for scanner.Scan() {
|
||||
data = append(data, scanner.Text())
|
||||
}
|
||||
return data, scanner.Err()
|
||||
}
|
||||
|
||||
func (ms MetadataService) fetchAttribute(key string) (string, error) {
|
||||
attrs, err := ms.fetchAttributes(key)
|
||||
if err == nil && len(attrs) > 0 {
|
||||
return attrs[0], nil
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
243
config/cloudinit/datasource/metadata/ec2/metadata_test.go
Executable file
243
config/cloudinit/datasource/metadata/ec2/metadata_test.go
Executable file
@@ -0,0 +1,243 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package ec2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/rancher/os/config/cloudinit/datasource"
|
||||
"github.com/rancher/os/config/cloudinit/datasource/metadata"
|
||||
"github.com/rancher/os/config/cloudinit/datasource/metadata/test"
|
||||
"github.com/rancher/os/config/cloudinit/pkg"
|
||||
"github.com/rancher/os/netconf"
|
||||
)
|
||||
|
||||
func TestType(t *testing.T) {
|
||||
want := "ec2-metadata-service"
|
||||
if kind := (MetadataService{}).Type(); kind != want {
|
||||
t.Fatalf("bad type: want %q, got %q", want, kind)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFetchAttributes(t *testing.T) {
|
||||
for _, s := range []struct {
|
||||
resources map[string]string
|
||||
err error
|
||||
tests []struct {
|
||||
path string
|
||||
val []string
|
||||
}
|
||||
}{
|
||||
{
|
||||
resources: map[string]string{
|
||||
"/": "a\nb\nc/",
|
||||
"/c/": "d\ne/",
|
||||
"/c/e/": "f",
|
||||
"/a": "1",
|
||||
"/b": "2",
|
||||
"/c/d": "3",
|
||||
"/c/e/f": "4",
|
||||
},
|
||||
tests: []struct {
|
||||
path string
|
||||
val []string
|
||||
}{
|
||||
{"/", []string{"a", "b", "c/"}},
|
||||
{"/b", []string{"2"}},
|
||||
{"/c/d", []string{"3"}},
|
||||
{"/c/e/", []string{"f"}},
|
||||
},
|
||||
},
|
||||
{
|
||||
err: fmt.Errorf("test error"),
|
||||
tests: []struct {
|
||||
path string
|
||||
val []string
|
||||
}{
|
||||
{"", nil},
|
||||
},
|
||||
},
|
||||
} {
|
||||
service := MetadataService{metadata.Service{
|
||||
Client: &test.HTTPClient{Resources: s.resources, Err: s.err},
|
||||
}}
|
||||
for _, tt := range s.tests {
|
||||
attrs, err := service.fetchAttributes(tt.path)
|
||||
if err != s.err {
|
||||
t.Fatalf("bad error for %q (%q): want %q, got %q", tt.path, s.resources, s.err, err)
|
||||
}
|
||||
if !reflect.DeepEqual(attrs, tt.val) {
|
||||
t.Fatalf("bad fetch for %q (%q): want %q, got %q", tt.path, s.resources, tt.val, attrs)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFetchAttribute(t *testing.T) {
|
||||
for _, s := range []struct {
|
||||
resources map[string]string
|
||||
err error
|
||||
tests []struct {
|
||||
path string
|
||||
val string
|
||||
}
|
||||
}{
|
||||
{
|
||||
resources: map[string]string{
|
||||
"/": "a\nb\nc/",
|
||||
"/c/": "d\ne/",
|
||||
"/c/e/": "f",
|
||||
"/a": "1",
|
||||
"/b": "2",
|
||||
"/c/d": "3",
|
||||
"/c/e/f": "4",
|
||||
},
|
||||
tests: []struct {
|
||||
path string
|
||||
val string
|
||||
}{
|
||||
{"/a", "1"},
|
||||
{"/b", "2"},
|
||||
{"/c/d", "3"},
|
||||
{"/c/e/f", "4"},
|
||||
},
|
||||
},
|
||||
{
|
||||
err: fmt.Errorf("test error"),
|
||||
tests: []struct {
|
||||
path string
|
||||
val string
|
||||
}{
|
||||
{"", ""},
|
||||
},
|
||||
},
|
||||
} {
|
||||
service := MetadataService{metadata.Service{
|
||||
Client: &test.HTTPClient{Resources: s.resources, Err: s.err},
|
||||
}}
|
||||
for _, tt := range s.tests {
|
||||
attr, err := service.fetchAttribute(tt.path)
|
||||
if err != s.err {
|
||||
t.Fatalf("bad error for %q (%q): want %q, got %q", tt.path, s.resources, s.err, err)
|
||||
}
|
||||
if attr != tt.val {
|
||||
t.Fatalf("bad fetch for %q (%q): want %q, got %q", tt.path, s.resources, tt.val, attr)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFetchMetadata(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
root string
|
||||
metadataPath string
|
||||
resources map[string]string
|
||||
expect datasource.Metadata
|
||||
clientErr error
|
||||
expectErr error
|
||||
}{
|
||||
{
|
||||
root: "/",
|
||||
metadataPath: "2009-04-04/meta-data/",
|
||||
resources: map[string]string{
|
||||
"/2009-04-04/meta-data/public-keys": "bad\n",
|
||||
},
|
||||
expectErr: fmt.Errorf("malformed public key: \"bad\""),
|
||||
},
|
||||
{
|
||||
root: "/",
|
||||
metadataPath: "2009-04-04/meta-data/",
|
||||
resources: map[string]string{
|
||||
"/2009-04-04/meta-data/hostname": "host",
|
||||
"/2009-04-04/meta-data/local-ipv4": "1.2.3.4",
|
||||
"/2009-04-04/meta-data/public-ipv4": "5.6.7.8",
|
||||
"/2009-04-04/meta-data/public-keys": "0=test1\n",
|
||||
"/2009-04-04/meta-data/public-keys/0": "openssh-key",
|
||||
"/2009-04-04/meta-data/public-keys/0/openssh-key": "key",
|
||||
},
|
||||
expect: datasource.Metadata{
|
||||
Hostname: "host",
|
||||
PrivateIPv4: net.ParseIP("1.2.3.4"),
|
||||
PublicIPv4: net.ParseIP("5.6.7.8"),
|
||||
SSHPublicKeys: map[string]string{"test1": "key"},
|
||||
NetworkConfig: netconf.NetworkConfig{
|
||||
Interfaces: map[string]netconf.InterfaceConfig{
|
||||
/* "eth0": netconf.InterfaceConfig{
|
||||
Addresses: []string{
|
||||
"1.2.3.4",
|
||||
"5.6.7.8",
|
||||
},
|
||||
},
|
||||
*/},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
root: "/",
|
||||
metadataPath: "2009-04-04/meta-data/",
|
||||
resources: map[string]string{
|
||||
"/2009-04-04/meta-data/hostname": "host domain another_domain",
|
||||
"/2009-04-04/meta-data/local-ipv4": "21.2.3.4",
|
||||
"/2009-04-04/meta-data/public-ipv4": "25.6.7.8",
|
||||
"/2009-04-04/meta-data/public-keys": "0=test1\n",
|
||||
"/2009-04-04/meta-data/public-keys/0": "openssh-key",
|
||||
"/2009-04-04/meta-data/public-keys/0/openssh-key": "key",
|
||||
},
|
||||
expect: datasource.Metadata{
|
||||
Hostname: "host",
|
||||
PrivateIPv4: net.ParseIP("21.2.3.4"),
|
||||
PublicIPv4: net.ParseIP("25.6.7.8"),
|
||||
SSHPublicKeys: map[string]string{"test1": "key"},
|
||||
NetworkConfig: netconf.NetworkConfig{
|
||||
Interfaces: map[string]netconf.InterfaceConfig{
|
||||
/* "eth0": netconf.InterfaceConfig{
|
||||
Addresses: []string{
|
||||
"1.2.3.4",
|
||||
"5.6.7.8",
|
||||
},
|
||||
},
|
||||
*/},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
clientErr: pkg.ErrTimeout{Err: fmt.Errorf("test error")},
|
||||
expectErr: pkg.ErrTimeout{Err: fmt.Errorf("test error")},
|
||||
},
|
||||
} {
|
||||
service := &MetadataService{metadata.Service{
|
||||
Root: tt.root,
|
||||
Client: &test.HTTPClient{Resources: tt.resources, Err: tt.clientErr},
|
||||
MetadataPath: tt.metadataPath,
|
||||
}}
|
||||
metadata, err := service.FetchMetadata()
|
||||
if Error(err) != Error(tt.expectErr) {
|
||||
t.Fatalf("bad error (%q): \nwant %q, \ngot %q\n", tt.resources, tt.expectErr, err)
|
||||
}
|
||||
if !reflect.DeepEqual(tt.expect, metadata) {
|
||||
t.Fatalf("bad fetch (%q): \nwant %#v, \ngot %#v\n", tt.resources, tt.expect, metadata)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func Error(err error) string {
|
||||
if err != nil {
|
||||
return err.Error()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
63
cmd/cloudinitsave/gce/metadata.go → config/cloudinit/datasource/metadata/gce/metadata.go
Normal file → Executable file
63
cmd/cloudinitsave/gce/metadata.go → config/cloudinit/datasource/metadata/gce/metadata.go
Normal file → Executable file
@@ -21,21 +21,27 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/coreos/coreos-cloudinit/datasource"
|
||||
"github.com/coreos/coreos-cloudinit/datasource/metadata"
|
||||
//"github.com/rancher/os/netconf"
|
||||
|
||||
"github.com/rancher/os/config/cloudinit/datasource"
|
||||
"github.com/rancher/os/config/cloudinit/datasource/metadata"
|
||||
)
|
||||
|
||||
const (
|
||||
apiVersion = "computeMetadata/v1/"
|
||||
metadataPath = apiVersion
|
||||
userdataPath = apiVersion + "instance/attributes/user-data"
|
||||
DefaultAddress = "http://metadata.google.internal/"
|
||||
apiVersion = "computeMetadata/v1/"
|
||||
metadataPath = apiVersion
|
||||
userdataPath = apiVersion + "instance/attributes/user-data"
|
||||
)
|
||||
|
||||
type MetadataService struct {
|
||||
metadata.MetadataService
|
||||
metadata.Service
|
||||
}
|
||||
|
||||
func NewDatasource(root string) *MetadataService {
|
||||
if root == "" {
|
||||
root = DefaultAddress
|
||||
}
|
||||
return &MetadataService{metadata.NewDatasource(root, apiVersion, userdataPath, metadataPath, http.Header{"Metadata-Flavor": {"Google"}})}
|
||||
}
|
||||
|
||||
@@ -61,28 +67,49 @@ func (ms MetadataService) FetchMetadata() (datasource.Metadata, error) {
|
||||
if err != nil {
|
||||
return datasource.Metadata{}, err
|
||||
}
|
||||
md := datasource.Metadata{
|
||||
PublicIPv4: public,
|
||||
PrivateIPv4: local,
|
||||
Hostname: hostname,
|
||||
SSHPublicKeys: nil,
|
||||
}
|
||||
|
||||
/* Disabled, using DHCP like in pre-0.9.1 - missing gateway and netmask, and testing time
|
||||
addresses := []string{}
|
||||
if public != nil {
|
||||
addresses = append(addresses, public.String())
|
||||
}
|
||||
if local != nil {
|
||||
addresses = append(addresses, local.String())
|
||||
}
|
||||
if len(addresses) > 0 {
|
||||
network := netconf.InterfaceConfig{
|
||||
Addresses: addresses,
|
||||
}
|
||||
|
||||
md.NetworkConfig.Interfaces = make(map[string]netconf.InterfaceConfig)
|
||||
md.NetworkConfig.Interfaces["eth0"] = network
|
||||
}
|
||||
*/
|
||||
|
||||
keyStrings := strings.Split(projectSSHKeys+"\n"+instanceSSHKeys, "\n")
|
||||
|
||||
sshPublicKeys := map[string]string{}
|
||||
i := 0
|
||||
for _, keyString := range keyStrings {
|
||||
keySlice := strings.SplitN(keyString, ":", 2)
|
||||
if len(keySlice) == 2 {
|
||||
key := strings.TrimSpace(keySlice[1])
|
||||
if key != "" {
|
||||
sshPublicKeys[strconv.Itoa(i)] = strings.TrimSpace(keySlice[1])
|
||||
if md.SSHPublicKeys == nil {
|
||||
md.SSHPublicKeys = map[string]string{}
|
||||
}
|
||||
md.SSHPublicKeys[strconv.Itoa(i)] = strings.TrimSpace(keySlice[1])
|
||||
i++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return datasource.Metadata{
|
||||
PublicIPv4: public,
|
||||
PrivateIPv4: local,
|
||||
Hostname: hostname,
|
||||
SSHPublicKeys: sshPublicKeys,
|
||||
}, nil
|
||||
return md, nil
|
||||
}
|
||||
|
||||
func (ms MetadataService) Type() string {
|
||||
@@ -90,7 +117,7 @@ func (ms MetadataService) Type() string {
|
||||
}
|
||||
|
||||
func (ms MetadataService) fetchString(key string) (string, error) {
|
||||
data, err := ms.FetchData(ms.MetadataUrl() + key)
|
||||
data, err := ms.FetchData(ms.MetadataURL() + key)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -115,12 +142,14 @@ func (ms MetadataService) fetchIP(key string) (net.IP, error) {
|
||||
}
|
||||
|
||||
func (ms MetadataService) FetchUserdata() ([]byte, error) {
|
||||
data, err := ms.FetchData(ms.UserdataUrl())
|
||||
// see https://github.com/number5/cloud-init/blob/master/cloudinit/sources/DataSourceGCE.py
|
||||
data, err := ms.FetchData(ms.UserdataURL())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(data) == 0 {
|
||||
data, err = ms.FetchData(ms.MetadataUrl() + "instance/attributes/startup-script")
|
||||
// see https://cloud.google.com/deployment-manager/docs/step-by-step-guide/setting-metadata-and-startup-scripts
|
||||
data, err = ms.FetchData(ms.MetadataURL() + "instance/attributes/startup-script")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
116
config/cloudinit/datasource/metadata/gce/metadata_test.go
Executable file
116
config/cloudinit/datasource/metadata/gce/metadata_test.go
Executable file
@@ -0,0 +1,116 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package gce
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
//"github.com/rancher/os/netconf"
|
||||
|
||||
"github.com/rancher/os/config/cloudinit/datasource"
|
||||
"github.com/rancher/os/config/cloudinit/datasource/metadata"
|
||||
"github.com/rancher/os/config/cloudinit/datasource/metadata/test"
|
||||
"github.com/rancher/os/config/cloudinit/pkg"
|
||||
)
|
||||
|
||||
func TestType(t *testing.T) {
|
||||
want := "gce-metadata-service"
|
||||
if kind := (MetadataService{}).Type(); kind != want {
|
||||
t.Fatalf("bad type: want %q, got %q", want, kind)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFetchMetadata(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
testName string
|
||||
root string
|
||||
metadataPath string
|
||||
resources map[string]string
|
||||
expect datasource.Metadata
|
||||
clientErr error
|
||||
expectErr error
|
||||
}{
|
||||
{
|
||||
testName: "one",
|
||||
root: "/",
|
||||
metadataPath: "computeMetadata/v1/",
|
||||
resources: map[string]string{},
|
||||
},
|
||||
{
|
||||
testName: "two",
|
||||
root: "/",
|
||||
metadataPath: "computeMetadata/v1/",
|
||||
resources: map[string]string{
|
||||
"/computeMetadata/v1/instance/hostname": "host",
|
||||
},
|
||||
expect: datasource.Metadata{
|
||||
Hostname: "host",
|
||||
},
|
||||
},
|
||||
{
|
||||
testName: "three",
|
||||
root: "/",
|
||||
metadataPath: "computeMetadata/v1/",
|
||||
resources: map[string]string{
|
||||
"/computeMetadata/v1/instance/hostname": "host",
|
||||
"/computeMetadata/v1/instance/network-interfaces/0/ip": "1.2.3.4",
|
||||
"/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip": "5.6.7.8",
|
||||
},
|
||||
expect: datasource.Metadata{
|
||||
Hostname: "host",
|
||||
PrivateIPv4: net.ParseIP("1.2.3.4"),
|
||||
PublicIPv4: net.ParseIP("5.6.7.8"),
|
||||
// NetworkConfig: netconf.NetworkConfig{
|
||||
// Interfaces: map[string]netconf.InterfaceConfig{
|
||||
// "eth0": netconf.InterfaceConfig{
|
||||
// Addresses: []string{
|
||||
// "5.6.7.8",
|
||||
// "1.2.3.4",
|
||||
// },
|
||||
// },
|
||||
// },
|
||||
// },
|
||||
},
|
||||
},
|
||||
{
|
||||
testName: "four",
|
||||
clientErr: pkg.ErrTimeout{Err: fmt.Errorf("test error")},
|
||||
expectErr: pkg.ErrTimeout{Err: fmt.Errorf("test error")},
|
||||
},
|
||||
} {
|
||||
service := &MetadataService{metadata.Service{
|
||||
Root: tt.root,
|
||||
Client: &test.HTTPClient{Resources: tt.resources, Err: tt.clientErr},
|
||||
MetadataPath: tt.metadataPath,
|
||||
}}
|
||||
metadata, err := service.FetchMetadata()
|
||||
if Error(err) != Error(tt.expectErr) {
|
||||
t.Fatalf("bad error (%q): want \n%q\n, got \n%q\n", tt.resources, tt.expectErr, err)
|
||||
}
|
||||
if !reflect.DeepEqual(tt.expect, metadata) {
|
||||
t.Fatalf("bad fetch %s(%q): want \n%#v\n, got \n%#v\n", tt.testName, tt.resources, tt.expect, metadata)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func Error(err error) string {
|
||||
if err != nil {
|
||||
return err.Error()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
44
vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/metadata.go → config/cloudinit/datasource/metadata/metadata.go
Normal file → Executable file
44
vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/metadata.go → config/cloudinit/datasource/metadata/metadata.go
Normal file → Executable file
@@ -15,45 +15,59 @@
|
||||
package metadata
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/coreos/coreos-cloudinit/pkg"
|
||||
"github.com/rancher/os/config/cloudinit/pkg"
|
||||
"github.com/rancher/os/log"
|
||||
)
|
||||
|
||||
type MetadataService struct {
|
||||
type Service struct {
|
||||
Root string
|
||||
Client pkg.Getter
|
||||
ApiVersion string
|
||||
APIVersion string
|
||||
UserdataPath string
|
||||
MetadataPath string
|
||||
lastError error
|
||||
}
|
||||
|
||||
func NewDatasource(root, apiVersion, userdataPath, metadataPath string, header http.Header) MetadataService {
|
||||
func NewDatasource(root, apiVersion, userdataPath, metadataPath string, header http.Header) Service {
|
||||
if !strings.HasSuffix(root, "/") {
|
||||
root += "/"
|
||||
}
|
||||
return MetadataService{root, pkg.NewHttpClientHeader(header), apiVersion, userdataPath, metadataPath}
|
||||
return Service{root, pkg.NewHTTPClientHeader(header), apiVersion, userdataPath, metadataPath, nil}
|
||||
}
|
||||
|
||||
func (ms MetadataService) IsAvailable() bool {
|
||||
_, err := ms.Client.Get(ms.Root + ms.ApiVersion)
|
||||
return (err == nil)
|
||||
func (ms Service) IsAvailable() bool {
|
||||
_, ms.lastError = ms.Client.Get(ms.Root + ms.APIVersion)
|
||||
if ms.lastError != nil {
|
||||
log.Errorf("%s: %s (lastError: %s)", "IsAvailable", ms.Root+":"+ms.UserdataPath, ms.lastError)
|
||||
}
|
||||
return (ms.lastError == nil)
|
||||
}
|
||||
|
||||
func (ms MetadataService) AvailabilityChanges() bool {
|
||||
func (ms *Service) Finish() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ms *Service) String() string {
|
||||
return fmt.Sprintf("%s: %s (lastError: %s)", "metadata", ms.Root+ms.UserdataPath, ms.lastError)
|
||||
}
|
||||
|
||||
func (ms Service) AvailabilityChanges() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (ms MetadataService) ConfigRoot() string {
|
||||
func (ms Service) ConfigRoot() string {
|
||||
return ms.Root
|
||||
}
|
||||
|
||||
func (ms MetadataService) FetchUserdata() ([]byte, error) {
|
||||
return ms.FetchData(ms.UserdataUrl())
|
||||
func (ms Service) FetchUserdata() ([]byte, error) {
|
||||
return ms.FetchData(ms.UserdataURL())
|
||||
}
|
||||
|
||||
func (ms MetadataService) FetchData(url string) ([]byte, error) {
|
||||
func (ms Service) FetchData(url string) ([]byte, error) {
|
||||
if data, err := ms.Client.GetRetry(url); err == nil {
|
||||
return data, err
|
||||
} else if _, ok := err.(pkg.ErrNotFound); ok {
|
||||
@@ -63,10 +77,10 @@ func (ms MetadataService) FetchData(url string) ([]byte, error) {
|
||||
}
|
||||
}
|
||||
|
||||
func (ms MetadataService) MetadataUrl() string {
|
||||
func (ms Service) MetadataURL() string {
|
||||
return (ms.Root + ms.MetadataPath)
|
||||
}
|
||||
|
||||
func (ms MetadataService) UserdataUrl() string {
|
||||
func (ms Service) UserdataURL() string {
|
||||
return (ms.Root + ms.UserdataPath)
|
||||
}
|
||||
185
config/cloudinit/datasource/metadata/metadata_test.go
Normal file
185
config/cloudinit/datasource/metadata/metadata_test.go
Normal file
@@ -0,0 +1,185 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package metadata
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/rancher/os/config/cloudinit/datasource/metadata/test"
|
||||
"github.com/rancher/os/config/cloudinit/pkg"
|
||||
)
|
||||
|
||||
func TestAvailabilityChanges(t *testing.T) {
|
||||
want := true
|
||||
if ac := (Service{}).AvailabilityChanges(); ac != want {
|
||||
t.Fatalf("bad AvailabilityChanges: want %t, got %t", want, ac)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsAvailable(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
root string
|
||||
apiVersion string
|
||||
resources map[string]string
|
||||
expect bool
|
||||
}{
|
||||
{
|
||||
root: "/",
|
||||
apiVersion: "2009-04-04",
|
||||
resources: map[string]string{
|
||||
"/2009-04-04": "",
|
||||
},
|
||||
expect: true,
|
||||
},
|
||||
{
|
||||
root: "/",
|
||||
resources: map[string]string{},
|
||||
expect: false,
|
||||
},
|
||||
} {
|
||||
service := &Service{
|
||||
Root: tt.root,
|
||||
Client: &test.HTTPClient{Resources: tt.resources, Err: nil},
|
||||
APIVersion: tt.apiVersion,
|
||||
}
|
||||
if a := service.IsAvailable(); a != tt.expect {
|
||||
t.Fatalf("bad isAvailable (%q): want %t, got %t", tt.resources, tt.expect, a)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFetchUserdata(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
root string
|
||||
userdataPath string
|
||||
resources map[string]string
|
||||
userdata []byte
|
||||
clientErr error
|
||||
expectErr error
|
||||
}{
|
||||
{
|
||||
root: "/",
|
||||
userdataPath: "2009-04-04/user-data",
|
||||
resources: map[string]string{
|
||||
"/2009-04-04/user-data": "hello",
|
||||
},
|
||||
userdata: []byte("hello"),
|
||||
},
|
||||
{
|
||||
root: "/",
|
||||
clientErr: pkg.ErrNotFound{Err: fmt.Errorf("test not found error")},
|
||||
userdata: []byte{},
|
||||
},
|
||||
{
|
||||
root: "/",
|
||||
clientErr: pkg.ErrTimeout{Err: fmt.Errorf("test timeout error")},
|
||||
expectErr: pkg.ErrTimeout{Err: fmt.Errorf("test timeout error")},
|
||||
},
|
||||
} {
|
||||
service := &Service{
|
||||
Root: tt.root,
|
||||
Client: &test.HTTPClient{Resources: tt.resources, Err: tt.clientErr},
|
||||
UserdataPath: tt.userdataPath,
|
||||
}
|
||||
data, err := service.FetchUserdata()
|
||||
if Error(err) != Error(tt.expectErr) {
|
||||
t.Fatalf("bad error (%q): want %q, got %q", tt.resources, tt.expectErr, err)
|
||||
}
|
||||
if !bytes.Equal(data, tt.userdata) {
|
||||
t.Fatalf("bad userdata (%q): want %q, got %q", tt.resources, tt.userdata, data)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestURLs(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
root string
|
||||
userdataPath string
|
||||
metadataPath string
|
||||
expectRoot string
|
||||
userdata string
|
||||
metadata string
|
||||
}{
|
||||
{
|
||||
root: "/",
|
||||
userdataPath: "2009-04-04/user-data",
|
||||
metadataPath: "2009-04-04/meta-data",
|
||||
expectRoot: "/",
|
||||
userdata: "/2009-04-04/user-data",
|
||||
metadata: "/2009-04-04/meta-data",
|
||||
},
|
||||
{
|
||||
root: "http://169.254.169.254/",
|
||||
userdataPath: "2009-04-04/user-data",
|
||||
metadataPath: "2009-04-04/meta-data",
|
||||
expectRoot: "http://169.254.169.254/",
|
||||
userdata: "http://169.254.169.254/2009-04-04/user-data",
|
||||
metadata: "http://169.254.169.254/2009-04-04/meta-data",
|
||||
},
|
||||
} {
|
||||
service := &Service{
|
||||
Root: tt.root,
|
||||
UserdataPath: tt.userdataPath,
|
||||
MetadataPath: tt.metadataPath,
|
||||
}
|
||||
if url := service.UserdataURL(); url != tt.userdata {
|
||||
t.Fatalf("bad url (%q): want %q, got %q", tt.root, tt.userdata, url)
|
||||
}
|
||||
if url := service.MetadataURL(); url != tt.metadata {
|
||||
t.Fatalf("bad url (%q): want %q, got %q", tt.root, tt.metadata, url)
|
||||
}
|
||||
if url := service.ConfigRoot(); url != tt.expectRoot {
|
||||
t.Fatalf("bad url (%q): want %q, got %q", tt.root, tt.expectRoot, url)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewDatasource(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
root string
|
||||
expectRoot string
|
||||
}{
|
||||
{
|
||||
root: "",
|
||||
expectRoot: "/",
|
||||
},
|
||||
{
|
||||
root: "/",
|
||||
expectRoot: "/",
|
||||
},
|
||||
{
|
||||
root: "http://169.254.169.254",
|
||||
expectRoot: "http://169.254.169.254/",
|
||||
},
|
||||
{
|
||||
root: "http://169.254.169.254/",
|
||||
expectRoot: "http://169.254.169.254/",
|
||||
},
|
||||
} {
|
||||
service := NewDatasource(tt.root, "", "", "", nil)
|
||||
if service.Root != tt.expectRoot {
|
||||
t.Fatalf("bad root (%q): want %q, got %q", tt.root, tt.expectRoot, service.Root)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func Error(err error) string {
|
||||
if err != nil {
|
||||
return err.Error()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
144
config/cloudinit/datasource/metadata/packet/metadata.go
Executable file
144
config/cloudinit/datasource/metadata/packet/metadata.go
Executable file
@@ -0,0 +1,144 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package packet
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/rancher/os/config/cloudinit/datasource"
|
||||
"github.com/rancher/os/config/cloudinit/datasource/metadata"
|
||||
"github.com/rancher/os/log"
|
||||
"github.com/rancher/os/netconf"
|
||||
|
||||
yaml "github.com/cloudfoundry-incubator/candiedyaml"
|
||||
packetMetadata "github.com/packethost/packngo/metadata"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultAddress = "https://metadata.packet.net/"
|
||||
apiVersion = ""
|
||||
userdataURL = "userdata"
|
||||
metadataPath = "metadata"
|
||||
)
|
||||
|
||||
type MetadataService struct {
|
||||
metadata.Service
|
||||
}
|
||||
|
||||
func NewDatasource(root string) *MetadataService {
|
||||
if root == "" {
|
||||
root = DefaultAddress
|
||||
}
|
||||
|
||||
return &MetadataService{Service: metadata.NewDatasource(root, apiVersion, userdataURL, metadataPath, nil)}
|
||||
}
|
||||
|
||||
func (ms *MetadataService) FetchMetadata() (metadata datasource.Metadata, err error) {
|
||||
c := packetMetadata.NewClient(http.DefaultClient)
|
||||
m, err := c.Metadata.Get()
|
||||
if err != nil {
|
||||
log.Errorf("Failed to get Packet metadata: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
bondCfg := netconf.InterfaceConfig{
|
||||
Addresses: []string{},
|
||||
BondOpts: map[string]string{
|
||||
"lacp_rate": "1",
|
||||
"xmit_hash_policy": "layer3+4",
|
||||
"downdelay": "200",
|
||||
"updelay": "200",
|
||||
"miimon": "100",
|
||||
"mode": "4",
|
||||
},
|
||||
}
|
||||
netCfg := netconf.NetworkConfig{
|
||||
Interfaces: map[string]netconf.InterfaceConfig{},
|
||||
}
|
||||
for _, iface := range m.Network.Interfaces {
|
||||
netCfg.Interfaces["mac="+iface.Mac] = netconf.InterfaceConfig{
|
||||
Bond: "bond0",
|
||||
}
|
||||
}
|
||||
for _, addr := range m.Network.Addresses {
|
||||
bondCfg.Addresses = append(bondCfg.Addresses, fmt.Sprintf("%s/%d", addr.Address, addr.Cidr))
|
||||
if addr.Gateway != "" {
|
||||
if addr.AddressFamily == 4 {
|
||||
if addr.Public {
|
||||
bondCfg.Gateway = addr.Gateway
|
||||
}
|
||||
} else {
|
||||
bondCfg.GatewayIpv6 = addr.Gateway
|
||||
}
|
||||
}
|
||||
|
||||
if addr.AddressFamily == 4 && strings.HasPrefix(addr.Gateway, "10.") {
|
||||
bondCfg.PostUp = append(bondCfg.PostUp, "ip route add 10.0.0.0/8 via "+addr.Gateway)
|
||||
}
|
||||
}
|
||||
|
||||
netCfg.Interfaces["bond0"] = bondCfg
|
||||
b, _ := yaml.Marshal(netCfg)
|
||||
log.Debugf("Generated network config: %s", string(b))
|
||||
|
||||
// the old code var data []byte
|
||||
/* var m Metadata
|
||||
|
||||
if data, err = ms.FetchData(ms.MetadataURL()); err != nil || len(data) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if err = json.Unmarshal(data, &m); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if len(m.NetworkData.Netblocks) > 0 {
|
||||
for _, Netblock := range m.NetworkData.Netblocks {
|
||||
if Netblock.AddressFamily == 4 {
|
||||
if Netblock.Public == true {
|
||||
metadata.PublicIPv4 = Netblock.Address
|
||||
} else {
|
||||
metadata.PrivateIPv4 = Netblock.Address
|
||||
}
|
||||
} else {
|
||||
metadata.PublicIPv6 = Netblock.Address
|
||||
}
|
||||
}
|
||||
}
|
||||
*/
|
||||
metadata.Hostname = m.Hostname
|
||||
metadata.SSHPublicKeys = map[string]string{}
|
||||
for i, key := range m.SshKeys {
|
||||
metadata.SSHPublicKeys[strconv.Itoa(i)] = key
|
||||
}
|
||||
|
||||
metadata.NetworkConfig = netCfg
|
||||
|
||||
// This is not really the right place - perhaps we should add a call-home function in each datasource to be called after the network is applied
|
||||
//(see the original in cmd/cloudsave/packet)
|
||||
if _, err = http.Post(m.PhoneHomeURL, "application/json", bytes.NewReader([]byte{})); err != nil {
|
||||
log.Errorf("Failed to post to Packet phone home URL: %v", err)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (ms MetadataService) Type() string {
|
||||
return "packet-metadata-service"
|
||||
}
|
||||
40
config/cloudinit/datasource/metadata/test/test.go
Normal file
40
config/cloudinit/datasource/metadata/test/test.go
Normal file
@@ -0,0 +1,40 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/rancher/os/config/cloudinit/pkg"
|
||||
)
|
||||
|
||||
type HTTPClient struct {
|
||||
Resources map[string]string
|
||||
Err error
|
||||
}
|
||||
|
||||
func (t *HTTPClient) GetRetry(url string) ([]byte, error) {
|
||||
if t.Err != nil {
|
||||
return nil, t.Err
|
||||
}
|
||||
if val, ok := t.Resources[url]; ok {
|
||||
return []byte(val), nil
|
||||
}
|
||||
return nil, pkg.ErrNotFound{fmt.Errorf("not found: %q", url)}
|
||||
}
|
||||
|
||||
func (t *HTTPClient) Get(url string) ([]byte, error) {
|
||||
return t.GetRetry(url)
|
||||
}
|
||||
@@ -12,16 +12,18 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package proc_cmdline
|
||||
package proccmdline
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"strings"
|
||||
|
||||
"github.com/coreos/coreos-cloudinit/datasource"
|
||||
"github.com/coreos/coreos-cloudinit/pkg"
|
||||
"github.com/rancher/os/log"
|
||||
|
||||
"github.com/rancher/os/config/cloudinit/datasource"
|
||||
"github.com/rancher/os/config/cloudinit/pkg"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -29,38 +31,48 @@ const (
|
||||
ProcCmdlineCloudConfigFlag = "cloud-config-url"
|
||||
)
|
||||
|
||||
type procCmdline struct {
|
||||
Location string
|
||||
type ProcCmdline struct {
|
||||
Location string
|
||||
lastError error
|
||||
}
|
||||
|
||||
func NewDatasource() *procCmdline {
|
||||
return &procCmdline{Location: ProcCmdlineLocation}
|
||||
func NewDatasource() *ProcCmdline {
|
||||
return &ProcCmdline{Location: ProcCmdlineLocation}
|
||||
}
|
||||
|
||||
func (c *procCmdline) IsAvailable() bool {
|
||||
contents, err := ioutil.ReadFile(c.Location)
|
||||
if err != nil {
|
||||
func (c *ProcCmdline) IsAvailable() bool {
|
||||
var contents []byte
|
||||
contents, c.lastError = ioutil.ReadFile(c.Location)
|
||||
if c.lastError != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
cmdline := strings.TrimSpace(string(contents))
|
||||
_, err = findCloudConfigURL(cmdline)
|
||||
return (err == nil)
|
||||
_, c.lastError = findCloudConfigURL(cmdline)
|
||||
return (c.lastError == nil)
|
||||
}
|
||||
|
||||
func (c *procCmdline) AvailabilityChanges() bool {
|
||||
func (c *ProcCmdline) Finish() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *ProcCmdline) String() string {
|
||||
return fmt.Sprintf("%s: %s (lastError: %s)", c.Type(), c.Location, c.lastError)
|
||||
}
|
||||
|
||||
func (c *ProcCmdline) AvailabilityChanges() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *procCmdline) ConfigRoot() string {
|
||||
func (c *ProcCmdline) ConfigRoot() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (c *procCmdline) FetchMetadata() (datasource.Metadata, error) {
|
||||
func (c *ProcCmdline) FetchMetadata() (datasource.Metadata, error) {
|
||||
return datasource.Metadata{}, nil
|
||||
}
|
||||
|
||||
func (c *procCmdline) FetchUserdata() ([]byte, error) {
|
||||
func (c *ProcCmdline) FetchUserdata() ([]byte, error) {
|
||||
contents, err := ioutil.ReadFile(c.Location)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -72,7 +84,7 @@ func (c *procCmdline) FetchUserdata() ([]byte, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client := pkg.NewHttpClient()
|
||||
client := pkg.NewHTTPClient()
|
||||
cfg, err := client.GetRetry(url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -81,7 +93,7 @@ func (c *procCmdline) FetchUserdata() ([]byte, error) {
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
func (c *procCmdline) Type() string {
|
||||
func (c *ProcCmdline) Type() string {
|
||||
return "proc-cmdline"
|
||||
}
|
||||
|
||||
102
config/cloudinit/datasource/proccmdline/proc_cmdline_test.go
Normal file
102
config/cloudinit/datasource/proccmdline/proc_cmdline_test.go
Normal file
@@ -0,0 +1,102 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package proccmdline
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestParseCmdlineCloudConfigFound(t *testing.T) {
|
||||
tests := []struct {
|
||||
input string
|
||||
expect string
|
||||
}{
|
||||
{
|
||||
"cloud-config-url=example.com",
|
||||
"example.com",
|
||||
},
|
||||
{
|
||||
"cloud_config_url=example.com",
|
||||
"example.com",
|
||||
},
|
||||
{
|
||||
"cloud-config-url cloud-config-url=example.com",
|
||||
"example.com",
|
||||
},
|
||||
{
|
||||
"cloud-config-url= cloud-config-url=example.com",
|
||||
"example.com",
|
||||
},
|
||||
{
|
||||
"cloud-config-url=one.example.com cloud-config-url=two.example.com",
|
||||
"two.example.com",
|
||||
},
|
||||
{
|
||||
"foo=bar cloud-config-url=example.com ping=pong",
|
||||
"example.com",
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
output, err := findCloudConfigURL(tt.input)
|
||||
if output != tt.expect {
|
||||
t.Errorf("Test case %d failed: %s != %s", i, output, tt.expect)
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("Test case %d produced error: %v", i, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcCmdlineAndFetchConfig(t *testing.T) {
|
||||
|
||||
var (
|
||||
ProcCmdlineTmpl = "foo=bar cloud-config-url=%s/config\n"
|
||||
CloudConfigContent = "#cloud-config\n"
|
||||
)
|
||||
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method == "GET" && r.RequestURI == "/config" {
|
||||
fmt.Fprint(w, CloudConfigContent)
|
||||
}
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
file, err := ioutil.TempFile(os.TempDir(), "test_proc_cmdline")
|
||||
defer os.Remove(file.Name())
|
||||
if err != nil {
|
||||
t.Errorf("Test produced error: %v", err)
|
||||
}
|
||||
_, err = file.Write([]byte(fmt.Sprintf(ProcCmdlineTmpl, ts.URL)))
|
||||
if err != nil {
|
||||
t.Errorf("Test produced error: %v", err)
|
||||
}
|
||||
|
||||
p := NewDatasource()
|
||||
p.Location = file.Name()
|
||||
cfg, err := p.FetchUserdata()
|
||||
if err != nil {
|
||||
t.Errorf("Test produced error: %v", err)
|
||||
}
|
||||
|
||||
if string(cfg) != CloudConfigContent {
|
||||
t.Errorf("Test failed, response body: %s != %s", cfg, CloudConfigContent)
|
||||
}
|
||||
}
|
||||
57
config/cloudinit/datasource/test/filesystem.go
Executable file
57
config/cloudinit/datasource/test/filesystem.go
Executable file
@@ -0,0 +1,57 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
)
|
||||
|
||||
type MockFilesystem map[string]File
|
||||
|
||||
type File struct {
|
||||
Path string
|
||||
Contents string
|
||||
Directory bool
|
||||
}
|
||||
|
||||
func (m MockFilesystem) ReadFile(filename string) ([]byte, error) {
|
||||
if f, ok := m[path.Clean(filename)]; ok {
|
||||
if f.Directory {
|
||||
return nil, fmt.Errorf("read %s: is a directory", filename)
|
||||
}
|
||||
return []byte(f.Contents), nil
|
||||
}
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
|
||||
func NewMockFilesystem(files ...File) MockFilesystem {
|
||||
fs := MockFilesystem{}
|
||||
for _, file := range files {
|
||||
fs[file.Path] = file
|
||||
|
||||
// Create the directories leading up to the file
|
||||
p := path.Dir(file.Path)
|
||||
for p != "/" && p != "." {
|
||||
if f, ok := fs[p]; ok && !f.Directory {
|
||||
panic(fmt.Sprintf("%q already exists and is not a directory (%#v)", p, f))
|
||||
}
|
||||
fs[p] = File{Path: p, Directory: true}
|
||||
p = path.Dir(p)
|
||||
}
|
||||
}
|
||||
return fs
|
||||
}
|
||||
115
config/cloudinit/datasource/test/filesystem_test.go
Normal file
115
config/cloudinit/datasource/test/filesystem_test.go
Normal file
@@ -0,0 +1,115 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package test
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestReadFile(t *testing.T) {
|
||||
tests := []struct {
|
||||
filesystem MockFilesystem
|
||||
|
||||
filename string
|
||||
contents string
|
||||
err error
|
||||
}{
|
||||
{
|
||||
filename: "dne",
|
||||
err: os.ErrNotExist,
|
||||
},
|
||||
{
|
||||
filesystem: MockFilesystem{
|
||||
"exists": File{Contents: "hi"},
|
||||
},
|
||||
filename: "exists",
|
||||
contents: "hi",
|
||||
},
|
||||
{
|
||||
filesystem: MockFilesystem{
|
||||
"dir": File{Directory: true},
|
||||
},
|
||||
filename: "dir",
|
||||
err: errors.New("read dir: is a directory"),
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
contents, err := tt.filesystem.ReadFile(tt.filename)
|
||||
if tt.contents != string(contents) {
|
||||
t.Errorf("bad contents (test %d): want %q, got %q", i, tt.contents, string(contents))
|
||||
}
|
||||
if !reflect.DeepEqual(tt.err, err) {
|
||||
t.Errorf("bad error (test %d): want %v, got %v", i, tt.err, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewMockFilesystem(t *testing.T) {
|
||||
tests := []struct {
|
||||
files []File
|
||||
|
||||
filesystem MockFilesystem
|
||||
}{
|
||||
{
|
||||
filesystem: MockFilesystem{},
|
||||
},
|
||||
{
|
||||
files: []File{{Path: "file"}},
|
||||
filesystem: MockFilesystem{
|
||||
"file": File{Path: "file"},
|
||||
},
|
||||
},
|
||||
{
|
||||
files: []File{{Path: "/file"}},
|
||||
filesystem: MockFilesystem{
|
||||
"/file": File{Path: "/file"},
|
||||
},
|
||||
},
|
||||
{
|
||||
files: []File{{Path: "/dir/file"}},
|
||||
filesystem: MockFilesystem{
|
||||
"/dir": File{Path: "/dir", Directory: true},
|
||||
"/dir/file": File{Path: "/dir/file"},
|
||||
},
|
||||
},
|
||||
{
|
||||
files: []File{{Path: "/dir/dir/file"}},
|
||||
filesystem: MockFilesystem{
|
||||
"/dir": File{Path: "/dir", Directory: true},
|
||||
"/dir/dir": File{Path: "/dir/dir", Directory: true},
|
||||
"/dir/dir/file": File{Path: "/dir/dir/file"},
|
||||
},
|
||||
},
|
||||
{
|
||||
files: []File{{Path: "/dir/dir/dir", Directory: true}},
|
||||
filesystem: MockFilesystem{
|
||||
"/dir": File{Path: "/dir", Directory: true},
|
||||
"/dir/dir": File{Path: "/dir/dir", Directory: true},
|
||||
"/dir/dir/dir": File{Path: "/dir/dir/dir", Directory: true},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
filesystem := NewMockFilesystem(tt.files...)
|
||||
if !reflect.DeepEqual(tt.filesystem, filesystem) {
|
||||
t.Errorf("bad filesystem (test %d): want %#v, got %#v", i, tt.filesystem, filesystem)
|
||||
}
|
||||
}
|
||||
}
|
||||
76
config/cloudinit/datasource/url/url.go
Executable file
76
config/cloudinit/datasource/url/url.go
Executable file
@@ -0,0 +1,76 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package url
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/rancher/os/config/cloudinit/datasource"
|
||||
"github.com/rancher/os/config/cloudinit/pkg"
|
||||
)
|
||||
|
||||
type RemoteFile struct {
|
||||
url string
|
||||
lastError error
|
||||
}
|
||||
|
||||
func NewDatasource(url string) *RemoteFile {
|
||||
return &RemoteFile{url, nil}
|
||||
}
|
||||
|
||||
func (f *RemoteFile) IsAvailable() bool {
|
||||
client := pkg.NewHTTPClient()
|
||||
_, f.lastError = client.Get(f.url)
|
||||
return (f.lastError == nil)
|
||||
}
|
||||
|
||||
func (f *RemoteFile) Finish() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *RemoteFile) String() string {
|
||||
return fmt.Sprintf("%s: %s (lastError: %s)", f.Type(), f.url, f.lastError)
|
||||
}
|
||||
|
||||
func (f *RemoteFile) AvailabilityChanges() bool {
|
||||
return false
|
||||
// TODO: we should trigger something to change the network state
|
||||
/* if f.lastError != nil {
|
||||
// if we have a Network error, then we should retry.
|
||||
// otherwise, we've made a request to the server, and its said nope.
|
||||
if _, ok := f.lastError.(pkg.ErrNetwork); !ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
*/
|
||||
}
|
||||
|
||||
func (f *RemoteFile) ConfigRoot() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (f *RemoteFile) FetchMetadata() (datasource.Metadata, error) {
|
||||
return datasource.Metadata{}, nil
|
||||
}
|
||||
|
||||
func (f *RemoteFile) FetchUserdata() ([]byte, error) {
|
||||
client := pkg.NewHTTPClient()
|
||||
return client.GetRetry(f.url)
|
||||
}
|
||||
|
||||
func (f *RemoteFile) Type() string {
|
||||
return "url"
|
||||
}
|
||||
209
config/cloudinit/datasource/vmware/vmware.go
Executable file
209
config/cloudinit/datasource/vmware/vmware.go
Executable file
@@ -0,0 +1,209 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package vmware
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
|
||||
"github.com/rancher/os/config/cloudinit/config"
|
||||
"github.com/rancher/os/config/cloudinit/datasource"
|
||||
"github.com/rancher/os/log"
|
||||
"github.com/rancher/os/netconf"
|
||||
)
|
||||
|
||||
type readConfigFunction func(key string) (string, error)
|
||||
type urlDownloadFunction func(url string) ([]byte, error)
|
||||
|
||||
type VMWare struct {
|
||||
ovfFileName string
|
||||
readConfig readConfigFunction
|
||||
urlDownload urlDownloadFunction
|
||||
lastError error
|
||||
}
|
||||
|
||||
func (v VMWare) Finish() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v VMWare) String() string {
|
||||
return fmt.Sprintf("%s: %s (lastError: %s)", v.Type(), v.ovfFileName, v.lastError)
|
||||
}
|
||||
|
||||
func (v VMWare) AvailabilityChanges() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (v VMWare) ConfigRoot() string {
|
||||
return "/"
|
||||
}
|
||||
|
||||
func (v VMWare) read(keytmpl string, args ...interface{}) (string, error) {
|
||||
key := fmt.Sprintf(keytmpl, args...)
|
||||
return v.readConfig(key)
|
||||
}
|
||||
|
||||
func (v VMWare) FetchMetadata() (metadata datasource.Metadata, err error) {
|
||||
metadata.NetworkConfig = netconf.NetworkConfig{}
|
||||
metadata.Hostname, _ = v.readConfig("hostname")
|
||||
|
||||
//netconf := map[string]string{}
|
||||
//saveConfig := func(key string, args ...interface{}) string {
|
||||
// key = fmt.Sprintf(key, args...)
|
||||
// val, _ := v.readConfig(key)
|
||||
// if val != "" {
|
||||
// netconf[key] = val
|
||||
// }
|
||||
// return val
|
||||
//}
|
||||
|
||||
for i := 0; ; i++ {
|
||||
val, _ := v.read("dns.server.%d", i)
|
||||
if val == "" {
|
||||
break
|
||||
}
|
||||
metadata.NetworkConfig.DNS.Nameservers = append(metadata.NetworkConfig.DNS.Nameservers, val)
|
||||
}
|
||||
|
||||
for i := 0; ; i++ {
|
||||
//if domain := saveConfig("dns.domain.%d", i); domain == "" {
|
||||
val, _ := v.read("dns.domain.%d", i)
|
||||
if val == "" {
|
||||
break
|
||||
}
|
||||
metadata.NetworkConfig.DNS.Search = append(metadata.NetworkConfig.DNS.Search, val)
|
||||
}
|
||||
|
||||
metadata.NetworkConfig.Interfaces = make(map[string]netconf.InterfaceConfig)
|
||||
found := true
|
||||
for i := 0; found; i++ {
|
||||
found = false
|
||||
|
||||
ethName := fmt.Sprintf("eth%d", i)
|
||||
netDevice := netconf.InterfaceConfig{
|
||||
DHCP: true,
|
||||
Match: ethName,
|
||||
Addresses: []string{},
|
||||
}
|
||||
//found = (saveConfig("interface.%d.name", i) != "") || found
|
||||
if val, _ := v.read("interface.%d.name", i); val != "" {
|
||||
netDevice.Match = val
|
||||
found = true
|
||||
}
|
||||
//found = (saveConfig("interface.%d.mac", i) != "") || found
|
||||
if val, _ := v.read("interface.%d.mac", i); val != "" {
|
||||
netDevice.Match = "mac:" + val
|
||||
found = true
|
||||
}
|
||||
//found = (saveConfig("interface.%d.dhcp", i) != "") || found
|
||||
if val, _ := v.read("interface.%d.dhcp", i); val != "" {
|
||||
netDevice.DHCP = (strings.ToLower(val) != "no")
|
||||
found = true
|
||||
}
|
||||
|
||||
role, _ := v.read("interface.%d.role", i)
|
||||
for a := 0; ; a++ {
|
||||
address, _ := v.read("interface.%d.ip.%d.address", i, a)
|
||||
if address == "" {
|
||||
break
|
||||
}
|
||||
netDevice.Addresses = append(netDevice.Addresses, address)
|
||||
found = true
|
||||
netDevice.DHCP = false
|
||||
|
||||
ip, _, err := net.ParseCIDR(address)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
//return metadata, err
|
||||
}
|
||||
|
||||
switch role {
|
||||
case "public":
|
||||
if ip.To4() != nil {
|
||||
metadata.PublicIPv4 = ip
|
||||
} else {
|
||||
metadata.PublicIPv6 = ip
|
||||
}
|
||||
case "private":
|
||||
if ip.To4() != nil {
|
||||
metadata.PrivateIPv4 = ip
|
||||
} else {
|
||||
metadata.PrivateIPv6 = ip
|
||||
}
|
||||
case "":
|
||||
default:
|
||||
//return metadata, fmt.Errorf("unrecognized role: %q", role)
|
||||
log.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
for r := 0; ; r++ {
|
||||
gateway, _ := v.read("interface.%d.route.%d.gateway", i, r)
|
||||
// TODO: do we really not do anything but default routing?
|
||||
//destination, _ := v.read("interface.%d.route.%d.destination", i, r)
|
||||
destination := ""
|
||||
|
||||
if gateway == "" && destination == "" {
|
||||
break
|
||||
} else {
|
||||
netDevice.Gateway = gateway
|
||||
found = true
|
||||
}
|
||||
}
|
||||
if found {
|
||||
metadata.NetworkConfig.Interfaces[ethName] = netDevice
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (v VMWare) FetchUserdata() ([]byte, error) {
|
||||
encoding, err := v.readConfig("cloud-init.data.encoding")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
data, err := v.readConfig("cloud-init.config.data")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Try to fallback to url if no explicit data
|
||||
if data == "" {
|
||||
url, err := v.readConfig("cloud-init.config.url")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if url != "" {
|
||||
rawData, err := v.urlDownload(url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
data = string(rawData)
|
||||
}
|
||||
}
|
||||
|
||||
if encoding != "" {
|
||||
return config.DecodeContent(data, encoding)
|
||||
}
|
||||
return []byte(data), nil
|
||||
}
|
||||
|
||||
func (v VMWare) Type() string {
|
||||
return "VMWare"
|
||||
}
|
||||
109
config/cloudinit/datasource/vmware/vmware_amd64.go
Executable file
109
config/cloudinit/datasource/vmware/vmware_amd64.go
Executable file
@@ -0,0 +1,109 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package vmware
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/rancher/os/log"
|
||||
"github.com/rancher/os/util"
|
||||
|
||||
"github.com/rancher/os/config/cloudinit/pkg"
|
||||
|
||||
"github.com/sigma/vmw-guestinfo/rpcvmx"
|
||||
"github.com/sigma/vmw-guestinfo/vmcheck"
|
||||
ovf "github.com/sigma/vmw-ovflib"
|
||||
)
|
||||
|
||||
type ovfWrapper struct {
|
||||
env *ovf.OvfEnvironment
|
||||
}
|
||||
|
||||
func (ovf ovfWrapper) readConfig(key string) (string, error) {
|
||||
return ovf.env.Properties["guestinfo."+key], nil
|
||||
}
|
||||
|
||||
func NewDatasource(fileName string) *VMWare {
|
||||
if util.GetHypervisor() != "vmware" {
|
||||
return nil
|
||||
}
|
||||
// read from provided ovf environment document (typically /media/ovfenv/ovf-env.xml)
|
||||
if fileName != "" {
|
||||
log.Printf("Using OVF environment from %s\n", fileName)
|
||||
ovfEnv, err := ioutil.ReadFile(fileName)
|
||||
if err != nil {
|
||||
ovfEnv = make([]byte, 0)
|
||||
}
|
||||
return &VMWare{
|
||||
ovfFileName: fileName,
|
||||
readConfig: getOvfReadConfig(ovfEnv),
|
||||
urlDownload: urlDownload,
|
||||
}
|
||||
}
|
||||
|
||||
// try to read ovf environment from VMware tools
|
||||
data, err := readConfig("ovfenv")
|
||||
if err == nil && data != "" {
|
||||
log.Printf("Using OVF environment from guestinfo\n")
|
||||
return &VMWare{
|
||||
readConfig: getOvfReadConfig([]byte(data)),
|
||||
urlDownload: urlDownload,
|
||||
}
|
||||
}
|
||||
|
||||
// if everything fails, fallback to directly reading variables from the backdoor
|
||||
log.Printf("Using guestinfo variables\n")
|
||||
return &VMWare{
|
||||
readConfig: readConfig,
|
||||
urlDownload: urlDownload,
|
||||
}
|
||||
}
|
||||
|
||||
func (v VMWare) IsAvailable() bool {
|
||||
if util.GetHypervisor() != "vmware" {
|
||||
return false
|
||||
}
|
||||
if v.ovfFileName != "" {
|
||||
_, v.lastError = os.Stat(v.ovfFileName)
|
||||
return !os.IsNotExist(v.lastError)
|
||||
}
|
||||
return vmcheck.IsVirtualWorld()
|
||||
}
|
||||
|
||||
func readConfig(key string) (string, error) {
|
||||
data, err := rpcvmx.NewConfig().String(key, "")
|
||||
if err == nil {
|
||||
log.Printf("Read from %q: %q\n", key, data)
|
||||
} else {
|
||||
log.Printf("Failed to read from %q: %v\n", key, err)
|
||||
}
|
||||
return data, err
|
||||
}
|
||||
|
||||
func getOvfReadConfig(ovfEnv []byte) readConfigFunction {
|
||||
env := &ovf.OvfEnvironment{}
|
||||
if len(ovfEnv) != 0 {
|
||||
env = ovf.ReadEnvironment(ovfEnv)
|
||||
}
|
||||
|
||||
wrapper := ovfWrapper{env}
|
||||
return wrapper.readConfig
|
||||
}
|
||||
|
||||
func urlDownload(url string) ([]byte, error) {
|
||||
client := pkg.NewHTTPClient()
|
||||
return client.GetRetry(url)
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user