verysu 设计模式 设计模式 响应式编程 百度开发平台 codeforces leetcode usfca

导航

mishell

ls -la
ls -ll
ls -t
ls -all
rz -be
生成md5
regexp_replace(hive_command,'--.*?\\r\\n',' ')
hive
spark
hadoop
xkill
hadoop fs -ls /yarn
hadoop job -list
less -M -N log.info
nautilus .
echo -n KJPNEOXTJQQA6JX3|md5sum
查看文件中是否有回车
od -c filename
du -h --max-depth=1
find / -name meta_env.sh 2>/dev/null
ln -s /home/rd/tools/infra-client/ ./
scp mi@10.231.53.161:/home/mi/文档/2021/log1.info /.
scp /home/rd/dac_table_2021-11-08-2037_back.sql Fujianming@10.231.53.131
scp liji@10.231.66.193:/home/liji/data/upsert.txt/upsert.txt ./liji/upsert.txt
mysql -h 10.174.333.42 -u governance_x -P 6887 -pNxNPjl0PqA23c1OooPkGERHabpXg2o
mysql -h10.38.131.146 -P3307 -umi -pvm0K12i9Y1zyIZJ9aJqBv
mysqldump -h 10.134.223.42 -u governance_x -p'NxNPjlooQ8PkGERHabpXg2o' -P 6887 --set-gtid-purged=OFF --routines --single_transaction --master-data=2 data_governance dac_table> dac_table_2021-11-08-2037_back.sql;
mysql -h10.174.223.42 -P6887 -ugovernance_x -pNxNPjl0PqAabpXg2o data_governance < /home/rd/liji5/upsert.txt
mysql -h 10.162.41.1 -u data_mama_per_br -D data_permission -P 3306 -p
mysql -h 10.108.231.20 -u data_perm_x -D data_permission -P 3306 -p
mysql -h10.38.161.146 -umi -P 3307 -p vm0K12ioYhkyIZJ9aJqBv
mysql -h 10.114.9.44 -u dkolibre_binr -D data_kolibre_scheduler -P 3306 -p
beeline -u 'jdbc:hive2://zjyprc-hadoop.spark-sql.hadoop.srv:10000/;principal=sql_prc/hadoop@XIAOMI.HADOOP?spark.yarn.queue=root.production.bigdata_group.data_paltform.onedata_dw_etl'
spark-shell --cluster zjyprc-hadoop-spark2.3 --conf spark.yarn.job.owners=wuyijie --master yarn --num-executors 500 --executor-cores 2 --driver-memory 8g --executor-memory 4g --queue root.production.bigdata_group.data_paltform.onedata_dw_etl
spark-shell --force-update --cluster zjyprc-hadoop-spark2.3 --master local[*] --principal s_dw_meta@xmfull.HADOOP --keytab /home/rd/keytab/s_dw_meta.keytab --queue root.production.bigdata_group.data_paltform.dw_business --driver-memory 5g --executor-memory 5g --conf spark.executor.memoryOverhead=1000m --conf spark.memory.offHeap.enabled=false --conf spark.memory.offHeap.size=5g --conf spark.memory.storageFraction=0.5 --conf spark.yarn.job.owners=liji5 --conf spark.shuffle.service.enabled=true --conf spark.sql.catalogImplementation=hive --conf spark.files.localize=hdfs://zjyprc-hadoop/spark/zjyprc-hadoop-spark2.3/cache/hive-site.xml --conf spark.kudu.endpoint=10.152.49.2:14000,10.152.49.11:14000,10.152.59.22:14000
spark-shell --force-update --cluster zjyprc-hadoop-spark3.1 --master local[*] --principal s_dw_meta@xmfull.HADOOP --keytab /home/rd/keytab/s_dw_meta.keytab --queue root.production.bigdata_group.data_paltform.dw_business --driver-memory 5g --executor-memory 5g --conf spark.executor.memoryOverhead=1000m --conf spark.sql.catalogImplementation=hive --conf spark.security.credentials.hive.enabled=true --conf spark.kudu.endpoint=10.152.49.2:14000,10.152.49.21:14000,10.152.49.22:14000
curl -X GET -H 'Authorization:service-meta-data;22a6642e47a9e965a584f04fe044b1ee;22a6642e47a9e965a584f04fe044b1ee' http://production-host/auth/v1/cost/hadoop
curl -H 'x-ssl-client-verify:SUCCESS' -H 'Authorization:cloud-manager/1.0 {"identifier": {"id": 5421}}' -X POST -k "http://c4-hadoop-tst-st15.bj:1500/test-tiering-api?xxx"

通过PS命名查找执行文件路径
1. ps -ef |grep 关键字 查找进程号
2. 通过netstat -anp|grep 进程号 ,可查看进程启动的端口
3. 重点到了
通过 pwdx 进程号 ,即可查看此进程的启动文件
亦可 lsof -p 进程号 | grep cwd ,也可达到同样效果

posted on 2021-12-06 20:36  泳之  阅读(72)  评论(0编辑  收藏  举报

我是谁? 回答错误