0
0

Delete article

Deleted articles cannot be recovered.

Draft of this article would be also deleted.

Are you sure you want to delete this article?

More than 5 years have passed since last update.

memo1909

Last updated at Posted at 2019-03-17

prepare

Login account
module directory
s3 key
xml page

confirm

cat /etc/redhat-release
grep processor /proc/cpuinfo | wc -l
free -h
df
ifconfig
hostname
(/etc/hosts)
ping
sestatus
(sudo sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config)
java -version
alternatives --display java
alternatives --config java
alternatives --config javac
timedatectl
(timedatectl set-timezone Asia/Tokyo)
yum list installed | grep bind-utils
(yum -y install bind-utils)
yum list installed | grep unzip
yum -y install unzip

cat /etc/passwd
sudo cat /etc/sudoers
cat /etc/group | grep wheel
cat /etc/security/limits.conf

  • soft nofile 94000
  • hard nofile 94000
  • soft noproc 64000
  • hard noproc 64000

ssh

sudo su
ssh-keygen -t rsa(each server)
cat /root/.ssh/id_rsa.pub
(serverside)
vi /root/.ssh/authorized_keys
→copy id_rsa.pub to authorized_keys
chmod 600 /root/.ssh/authorized_keys
ssh USER@IP
ls -l /home/centos
(sudo chmod 700 /home/centos)
cat id_rsa.pub
cat id_rsa.pub >> authorized_keys
sudo grep Pubkey /etc/ssh/sshd_config
sudo grep KeysFile /etc/ssh/sshd_config

file share

scp /home/centos/ root@:/home/centos/

jdk*3

mkdir /usr/java/
tar zxvf jdk-8u191-linux-x64.tar.gz -C /usr/java/
alternatives --install /usr/bin/java java /usr/java/jdk1.8.0_191/bin/java 20000
alternatives --install /usr/bin/javac javac /usr/java/jdk1.8.0_191/bin/javac 20000
alternatives --config java
alternatives --config javac
ln -s /usr/java/jdk1.8.0_191/bin/jstack /usr/bin/jstack

mongo*3

yum -y localinstall --disablerepo=* mongo*
mkdir -p /usr/local/paxata/mongo
chown mongod:mongod /usr/local/paxata/mongo
cp /etc/mongod.conf /etc/mongod.conf_bk
vi /etc/mongod.conf
-change-
dpPath: /usr/local/paxata/mongo
-# network interfaces
net:
port: 27017
bindIp: 0.0.0.0
-# Listen to local interface only, comment to listen on all interfaces.
replication:
replSetName: "paxatadb"
-change-
service mongod start
mongo
rs.initiate()
rs.add("ip-192-168-1-59.ap-northeast-1.compute.internal:27017")
rs.add("ip-192-168-1-37.ap-northeast-1.compute.internal:27017")
rs.status()
tail -f /var/log/mongodb/mongod.log
mongo*3
chkconfig mongod on

yum erase $(rpm -qa | grep mongodb-org)
rm -r /var/log/mongodb
rm -r /var/lib/mongo

Spark*3

tar -zxvf spark-2.3.0-bin-hadoop2.6.tgz -C /usr/local/paxata/
ln -s /usr/local/paxata/spark-2.3.0-bin-hadoop2.6 /usr/local/paxata/spark
cp /usr/local/paxata/spark/conf/spark-env.sh.template /usr/local/paxata/spark/conf/spark-env.sh
vi /usr/local/paxata/spark/conf/spark-env.sh
-change-
SPARK_MASTER_IP="ip-192-168-1-26.ap-northeast-1.compute.internal"
SPARK_WORKER_CORES=8
SPARK_WORKER_MEMORY=32g
SPARK_WORKER_INSTANCiES=1
SPARK_LOCAL_DIRS=/usr/local/paxata/spark/tmp
SPARK_LOG_DIR=/usr/local/paxata/spark/logs
SPARK_PID_DIR=/usr/local/paxata/spark/pid
SPARK_WORKER_DIRS=/usr/local/paxata/spark/work
SPARK_MASTER_PORT=7077
SPARK_WORKER_PORT=7078
cp /usr/local/paxata/spark/conf/slaves.template /usr/local/paxata/spark/conf/slaves
vi /usr/local/paxata/spark/conf/slaves
-/change-
-change-
ip-192-168-1-47.ap-northeast-1.compute.internal
ip-192-168-1-5.ap-northeast-1.compute.internal
-/change-

useradd paxata
mkdir /usr/local/paxata/spark/tmp
mkdir /usr/local/paxata/spark/logs
chown -R paxata:paxata /usr/local/paxata/spark-2.3.0-bin-hadoop2.6
/usr/local/paxata/spark/sbin/start-all.sh
-url:8080-

master

yum -y localinstall --disablerepo=* paxata-pipeline-db2.3.0
cp /usr/local/paxata/pipeline/config/spark.properties /usr/local/paxata/pipeline/config/spark.properties_bk
vi /usr/local/paxata/pipeline/config/spark.properties
-change-
-##Valid config options are spark://master:port or yarn-client
master.url=spark://ip-192-168-1-26.ap-northeast-1.compute.internal:7077
spark.home=/usr/local/paxata/spark
-##The config variables below are only required for Spark on YARN
-# hadoop.conf=/etc/hadoop/conf
-# yarn.num.executors=5
-# yarn.executor.cores=1
-# spark.yarn.jar=hdfs://paxcdh54yik/user/spark/share/lib/spark-assembly-1.3.0-cdh5.4.8-hadoop2.6.0-cdh5.4.8.jar
-# spark.yarn.queue=spark
-/change-

cp /usr/local/paxata/pipeline/config/paxata.properties /usr/local/paxata/pipeline/config/paxata.properties_bk
vi /usr/local/paxata/pipeline/config/paxata.properties
-change-
px.rootdir=/usr/local/paxata/pipeline/cache
px.total.cache.capacity=60000
-# The following are used by the pipeline startup script
px.executor.memory=30G
px.partition.maxBytes=100000000
px.ulimit.min=1024
-## Garbage Collection / JVM Options
px.xx.-OmitStackTraceInFastThrow
px.xx.InitialHeapSize=1g
px.xx.MaxHeapSize=4g
px.xx.MaxMetaspaceSize=512m
px.xx.MetaspaceSize=256m
-/change-

cp /usr/local/paxata/pipeline/config/clustering-algorithms.properties /usr/local/paxata/pipeline/config/clustering-algorithms.properties_bk
vi /usr/local/paxata/pipeline/config/clustering-algorithms.properties
-change-
cluster.metaphonejp=com.zalesia.paxata.localization.CustomKormizeJPClusterAlgorithm
-/change-

su paxata
service paxata-pipeline start
-url:8080-

Core

yum -y localinstall --disablerepo=* paxata-server-2018.2.
cp /usr/local/paxata/server/config/px.properties /usr/local/paxata/server/config/px.properties_bk
vi /usr/local/paxata/server/config/px.properties

-change-
-# px.clientId=CHANGE_ME
px.clientId=paxatapoc
-# px.pipeline.url=
px.pipeline.url=http://ip-192-168-1-26.ap-northeast-1.compute.internal:8090
-# px.library.url=
px.library.url=http://ip-192-168-1-26.ap-northeast-1.compute.internal:9080/library
-# Locales for installed localization
px.supported.locales=en,jp
-# Default encoding for the system
px.default.encoding=UTF-8
-# Web server port configuration
px.port=8000
px.port.redirect=false
-# mongo.hosts=127.0.0.1:27017
mongo.hosts=ip-192-168-1-26.ap-northeast-1.compute.internal:27017,ip-192-168-1-59.ap-northeast-1.compute.internal:27017,ip-192-168-1-37.ap-northeast-1.compute.internal:27017
-# Library Storage Configuration
px.library.storage.fs.type=simple
px.library.storage.fs.rootDirectory=/paxata
px.library.storage.fs.distribution=aws
px.library.storage.fs.resources=/usr/local/paxata/core-site.xml
px.library.storage.fs.user=paxata
px.temp.storage.fs.type=local
px.temp.storage.fs.rootDirectory=/usr/local/paxata/tmp

-/change-

cp /usr/local/paxata/server/config/px-library_jp.properties /usr/local/paxata/server/config/px-library_jp.properties_bk
vi /usr/local/paxata/server/config/px-library_jp.properties
-change-
export.datasource.title=データセットをエクスポートしています
-/change-
cp /usr/local/paxata/server/config/px-projects_jp.properties /usr/local/paxata/server/config/px-projects_jp.properties_bk
vi /usr/local/paxata/server/config/px-projects_jp.properties
-change-
project.editors.lookup.rightOption=選択したデータセットをルックアップに使用
-/change-

mkdir -p /usr/local/paxata/tmp
chown -R paxata:paxata /usr/local/paxata/tmp
chmod 775 /usr/local/paxata/tmp

vi /usr/local/paxata/core-site.xml
chown paxata:paxata /usr/local/paxata/core-site.xml
mkdir /usr/local/paxata/server/s3-buffer-dir
chown paxata:paxata /usr/local/paxata/server/s3-buffer-dir

su paxata
service paxata-server start

(if port80,root user)
(setcap 'cap_net_bind_service=+ep' /usr/java/jdk1.8.0_191/bin/java && echo '/usr/java/jdk1.8.0_191/lib/amd64/jli' > /etc/ld.so.conf.d/java.conf && ldconfig)

-Connector-
unzip connector.zip
cd connector
curl -ksS -u "superuser:superuser" -X POST "http://localhost:8000/rest/connector/factories?pretty=true" -F "file=@connector-rest-api-2018.2.2.0.944.zip"
curl -ksS -u "superuser:superuser" -X POST "http://localhost:8000/rest/connector/factories?pretty=true" -F "file=@connector-azure-sql-dw-2018.2.3.0.1223.zip"
curl -ksS -u "superuser:superuser" -X POST "http://localhost:8000/rest/connector/factories?pretty=true" -F "file=@connector-cdh5-2018.2.3.0.1223-hdfs.zip"
curl -ksS -u "superuser:superuser" -X POST "http://localhost:8000/rest/connector/factories?pretty=true" -F "file=@connector-cdh5-2018.2.3.0.1223-hive.zip"
curl -ksS -u "superuser:superuser" -X POST "http://localhost:8000/rest/connector/factories?pretty=true" -F "file=@connector-cloudant-2018.2.3.0.1223.zip"
curl -ksS -u "superuser:superuser" -X POST "http://localhost:8000/rest/connector/factories?pretty=true" -F "file=@connector-dynamics-365-2018.2.3.0.1223.zip"
curl -ksS -u "superuser:superuser" -X POST "http://localhost:8000/rest/connector/factories?pretty=true" -F "file=@connector-dynamodb-2018.2.3.0.1223.zip"
curl -ksS -u "superuser:superuser" -X POST "http://localhost:8000/rest/connector/factories?pretty=true" -F "file=@connector-ftp-2018.2.3.0.1223.zip"
curl -ksS -u "superuser:superuser" -X POST "http://localhost:8000/rest/connector/factories?pretty=true" -F "file=@connector-gcs-2018.2.3.0.1223.zip"
curl -ksS -u "superuser:superuser" -X POST "http://localhost:8000/rest/connector/factories?pretty=true" -F "file=@connector-hdp2-2018.2.3.0.1223-hdfs.zip"
curl -ksS -u "superuser:superuser" -X POST "http://localhost:8000/rest/connector/factories?pretty=true" -F "file=@connector-hdp2-2018.2.3.0.1223-hive.zip"
curl -ksS -u "superuser:superuser" -X POST "http://localhost:8000/rest/connector/factories?pretty=true" -F "file=@connector-jdbc-2018.2.3.0.1223.zip"
curl -ksS -u "superuser:superuser" -X POST "http://localhost:8000/rest/connector/factories?pretty=true" -F "file=@connector-mstr-2018.2.3.0.1223.zip"
curl -ksS -u "superuser:superuser" -X POST "http://localhost:8000/rest/connector/factories?pretty=true" -F "file=@connector-redshift-2018.2.3.0.1223.zip"
curl -ksS -u "superuser:superuser" -X POST "http://localhost:8000/rest/connector/factories?pretty=true" -F "file=@connector-s3-2018.2.3.0.1223.zip"
curl -ksS -u "superuser:superuser" -X POST "http://localhost:8000/rest/connector/factories?pretty=true" -F "file=@connector-s3-hdfs-2018.2.3.0.1223.zip"
curl -ksS -u "superuser:superuser" -X POST "http://localhost:8000/rest/connector/factories?pretty=true" -F "file=@connector-salesforce-rest-2018.2.3.0.1223.zip"
curl -ksS -u "superuser:superuser" -X POST "http://localhost:8000/rest/connector/factories?pretty=true" -F "file=@connector-sharepoint-2018.2.3.0.1223.zip"
curl -ksS -u "superuser:superuser" -X POST "http://localhost:8000/rest/connector/factories?pretty=true" -F "file=@connector-smb-2018.2.3.0.1223.zip"
curl -ksS -u "superuser:superuser" -X POST "http://localhost:8000/rest/connector/factories?pretty=true" -F "file=@connector-snowflake-2018.2.3.0.1223.zip"
curl -ksS -u "superuser:superuser" -X POST "http://localhost:8000/rest/connector/factories?pretty=true" -F "file=@connector-tableau-2018.2.3.0.1223.zip"
curl -ksS -u "superuser:superuser" -X POST "http://localhost:8000/rest/connector/factories?pretty=true" -F "file=@connector-wasb-2018.2.3.0.1223.zip"

sudo vi /usr/local/paxata/server/drivers/jdbc-driver.properties
h2=org.h2.Driver
oracle=oracle.jdbc.OracleDriver
mysql=com.mysql.jdbc.Driver
postgres=org.postgresql.Driver
netezza=org.netezza.Driver
db2=com.ibm.db2.jcc.DB2Driver
composite=cs.jdbc.driver.CompositeDriver
teradata=com.teradata.jdbc.TeraDriver
sqlserver=com.microsoft.sqlserver.jdbc.SQLServerDriver
jtds=net.sourceforge.jtds.jdbc.Driver
sqlserver=com.microsoft.sqlserver.jdbc.SQLServerDriver
sap=com.sap.db.jdbc.Driver

-jar driver-
copy the driver file to:
/usr/local/paxata/server/drivers/
Modify the file permissions:
chown paxata:paxata YOURFILENAME.jar
(sudo chown paxata:paxata /usr/local/paxata/server/drivers/)
chmod 755 YOURFILENAME.jar
(sudo chmod 755 /usr/local/paxata/server/drivers/
)

0
0
0

Register as a new user and use Qiita more conveniently

  1. You get articles that match your needs
  2. You can efficiently read back useful information
  3. You can use dark theme
What you can do with signing up
0
0

Delete article

Deleted articles cannot be recovered.

Draft of this article would be also deleted.

Are you sure you want to delete this article?