kafka: upgrade downloaded binaries and references in kafka.service (
Bug 1434339) r?gps
Each broker will need to be restarted after deploying this commit.
MozReview-Commit-ID: INI28zXEn9q
--- a/ansible/roles/docker-kafkabroker/files/start-kafka
+++ b/ansible/roles/docker-kafkabroker/files/start-kafka
@@ -102,35 +102,15 @@ for hostname in hostnames:
raise Exception('expected followers count wrong')
except Exception:
if time.time() - start > timeout:
raise Exception('timeout')
time.sleep(0.1)
-command = [
- '/usr/bin/java',
- '-Xmx1G',
- '-Xms1G',
- '-server',
- '-XX:+UseParNewGC',
- '-XX:+UseConcMarkSweepGC',
- '-XX:+CMSClassUnloadingEnabled',
- '-XX:+CMSScavengeBeforeRemark',
- '-XX:+DisableExplicitGC',
- '-Djava.awt.headless=true',
- '-Xloggc:/var/log/kafka/server-gc.log',
- '-verbose:gc',
- '-XX:+PrintGCDetails',
- '-XX:+PrintGCDateStamps',
- '-XX:+PrintGCTimeStamps',
- '-Dcom.sun.management.jmxremote',
- '-Dcom.sun.management.jmxremote.authenticate=false',
- '-Dcom.sun.management.jmxremote.ssl=false',
- '-Dlog4j.configuration=file:/etc/kafka/log4j.properties',
- '-cp',
- ':/opt/kafka/libs/jopt-simple-3.2.jar:/opt/kafka/libs/kafka_2.10-0.8.2.2.jar:/opt/kafka/libs/kafka_2.10-0.8.2.2-javadoc.jar:/opt/kafka/libs/kafka_2.10-0.8.2.2-scaladoc.jar:/opt/kafka/libs/kafka_2.10-0.8.2.2-sources.jar:/opt/kafka/libs/kafka_2.10-0.8.2.2-test.jar:/opt/kafka/libs/kafka-clients-0.8.2.2.jar:/opt/kafka/libs/log4j-1.2.16.jar:/opt/kafka/libs/lz4-1.2.0.jar:/opt/kafka/libs/metrics-core-2.2.0.jar:/opt/kafka/libs/scala-library-2.10.4.jar:/opt/kafka/libs/slf4j-api-1.7.6.jar:/opt/kafka/libs/slf4j-log4j12-1.6.1.jar:/opt/kafka/libs/snappy-java-1.1.1.6.jar:/opt/kafka/libs/zkclient-0.3.jar:/opt/kafka/libs/zookeeper-3.4.6.jar:/opt/kafka/core/build/libs/kafka_2.10*.jar',
- 'kafka.Kafka',
- '/etc/kafka/server.properties',
-]
+env = dict(os.environ)
+env.update({
+ 'KAFKA_GC_LOG_OPTS': '-Xloggc:/var/log/kafka/server-gc.log -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=100M',
+ 'KAFKA_LOG4J_OPTS': '-Dlog4j.configuration=file:/etc/kafka/log4j.properties',
+})
-os.execl(command[0], *command)
\ No newline at end of file
+os.execle('/opt/kafka/bin/kafka-server-start.sh', '-daemon', '/etc/kafka/server.properties', env)
--- a/ansible/roles/kafka-broker/files/kafka.service
+++ b/ansible/roles/kafka-broker/files/kafka.service
@@ -1,16 +1,21 @@
[Unit]
Description=Kafka distributed log server
-After=network.target remote-fs.target nss-lookup.target
+After=network.target remote-fs.target nss-lookup.target zookeeper.service
[Service]
WorkingDirectory=/
User=zookeeper
Group=zookeeper
PrivateTmp=true
-ExecStart=/usr/bin/java -Xmx1G -Xms1G -server -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:+CMSClassUnloadingEnabled -XX:+CMSScavengeBeforeRemark -XX:+DisableExplicitGC -Djava.awt.headless=true -Xloggc:/var/log/kafka/server-gc.log -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dlog4j.configuration=file:/etc/kafka/log4j.properties -cp :/opt/kafka/libs/jopt-simple-3.2.jar:/opt/kafka/libs/kafka_2.10-0.8.2.2.jar:/opt/kafka/libs/kafka_2.10-0.8.2.2-javadoc.jar:/opt/kafka/libs/kafka_2.10-0.8.2.2-scaladoc.jar:/opt/kafka/libs/kafka_2.10-0.8.2.2-sources.jar:/opt/kafka/libs/kafka_2.10-0.8.2.2-test.jar:/opt/kafka/libs/kafka-clients-0.8.2.2.jar:/opt/kafka/libs/log4j-1.2.16.jar:/opt/kafka/libs/lz4-1.2.0.jar:/opt/kafka/libs/metrics-core-2.2.0.jar:/opt/kafka/libs/scala-library-2.10.4.jar:/opt/kafka/libs/slf4j-api-1.7.6.jar:/opt/kafka/libs/slf4j-log4j12-1.6.1.jar:/opt/kafka/libs/snappy-java-1.1.1.6.jar:/opt/kafka/libs/zkclient-0.3.jar:/opt/kafka/libs/zookeeper-3.4.6.jar:/opt/kafka/core/build/libs/kafka_2.10*.jar kafka.Kafka /etc/kafka/server.properties
+# Set custom logging directories via environment variables
+Environment="KAFKA_GC_LOG_OPTS=-Xloggc:/var/log/kafka/server-gc.log -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=100M"
+Environment="KAFKA_LOG4J_OPTS=-Dlog4j.configuration=file:/etc/kafka/log4j.properties"
+
+ExecStart=/opt/kafka/bin/kafka-server-start.sh --daemon /etc/kafka/server.properties
+ExecStop=/opt/kafka/bin/kafka-server-stop.sh
Restart=always
TimeoutStopSec=60
[Install]
WantedBy=multi-user.target
--- a/ansible/roles/kafka-broker/tasks/main.yml
+++ b/ansible/roles/kafka-broker/tasks/main.yml
@@ -10,28 +10,28 @@
- tar
- name: download ZooKeeper and Kafka
get_url: url=https://s3-us-west-2.amazonaws.com/moz-packages/{{ item.path }}
dest=/var/tmp/{{ item.path }}
sha256sum={{ item.sha256 }}
with_items:
- { path: zookeeper-3.4.11.tar.gz, sha256: f6bd68a1c8f7c13ea4c2c99f13082d0d71ac464ffaf3bf7a365879ab6ad10e84 }
- - { path: kafka_2.10-0.8.2.2.tgz, sha256: 3ba1967ee88c7f364964c8a8fdf6f5075dcf7572f8c9eb74f0285b308363ecab }
+ - { path: kafka_2.11-1.0.0.tgz, sha256: b5b535f8db770cda8513e391917d0f5a35ef24c537ef3d29dcd9aa287da529f5 }
- name: uncompress ZooKeeper and Kafka
unarchive: src=/var/tmp/{{ item.src }}
dest=/opt
copy=no
creates={{ item.creates }}
owner=root
group=root
with_items:
- { src: zookeeper-3.4.11.tar.gz, creates: zookeeper-3.4.11 }
- - { src: kafka_2.10-0.8.2.2.tgz, creates: kafka_2.10-0.8.2.2 }
+ - { src: kafka_2.11-1.0.0.tgz, creates: kafka_2.11-1.0.0 }
- user: name=zookeeper
uid=2321
shell=/bin/bash
createhome=no
home=/var/run/zookeeper
when: "{{ inventory_hostname not in ignore_zookeeper_user | default([]) }}"
@@ -53,17 +53,17 @@
mode=0755
with_items:
- /var/lib/kafka
- /var/lib/zookeeper
- /var/log/kafka
- /var/log/zookeeper
- name: create Kafka symlink
- file: src=/opt/kafka_2.10-0.8.2.2
+ file: src=/opt/kafka_2.11-1.0.0
path=/opt/kafka
state=link
- name: create ZooKeeper symlink
file: src=/opt/zookeeper-3.4.11
path=/opt/zookeeper
state=link
--- a/pylib/vcsreplicator/tests/test-cluster-unavailable.t
+++ b/pylib/vcsreplicator/tests/test-cluster-unavailable.t
@@ -76,16 +76,17 @@ Disabling 2 Kafka nodes should result in
[255]
Adding node back in should result in being able to push again
$ hgmo exec hgweb0 /usr/bin/supervisorctl start kafka
kafka: started
$ hgmo exec hgweb1 /usr/bin/supervisorctl start kafka
kafka: started
+ $ sleep 3
$ hgmo exec hgssh /var/hg/venv_pash/bin/hg sendheartbeat
sending heartbeat to partition 0
sending heartbeat to partition 1
sending heartbeat to partition 2
sending heartbeat to partition 3
sending heartbeat to partition 4
sending heartbeat to partition 5
sending heartbeat to partition 6