Monday, December 2, 2013

Cleanup /tmp directory automatically every day


sed -i 's/0/1/' /etc/default/rcS

Saturday, October 26, 2013

Customized zabbix monitoring

# Customized disk i/o monitoring
UserParameter=custom.vfs.dev.read.ops[*],cat /proc/diskstats | grep $1 | head -1 | awk '{print $$4}'
UserParameter=custom.vfs.dev.read.ms[*],cat /proc/diskstats | grep $1 | head -1 | awk '{print $$7}'
UserParameter=custom.vfs.dev.write.ops[*],cat /proc/diskstats | grep $1 | head -1 | awk '{print $$8}'
UserParameter=custom.vfs.dev.write.ms[*],cat /proc/diskstats | grep $1 | head -1 | awk '{print $$11}'
UserParameter=custom.vfs.dev.io.active[*],cat /proc/diskstats | grep $1 | head -1 | awk '{print $$12}'
UserParameter=custom.vfs.dev.io.ms[*],cat /proc/diskstats | grep $1 | head -1 | awk '{print $$13}'
UserParameter=custom.vfs.dev.read.sectors[*],cat /proc/diskstats | grep $1 | head -1 | awk '{print $$6}'
UserParameter=custom.vfs.dev.write.sectors[*],cat /proc/diskstats | grep $1 | head -1 | awk '{print $$10}'

# Customized MySQL monitoring
UserParameter=mysql.daily,php /etc/zabbix/scripts/mysql.php live zabbix xxxxxxxxx
UserParameter=mysql.live,php /etc/zabbix/scripts/mysql.php live zabbix   xxxxxxxx

# Pacemaker monitoring
UserParameter=pacemaker.status, sudo /usr/sbin/crm_mon -s |cut -f2 -d:
UserParameter=pacemaker-ipresource.status, sudo /usr/sbin/crm_mon -1 |grep "heartbeat:IPaddr2" |awk '{print $3}'
UserParameter=pacemaker-iparpresource.status, sudo /usr/sbin/crm_mon -1 |grep "heartbeat:SendArp" |awk '{print $3}'

UserParameter=pacemaker-vip.master, sudo /usr/sbin/crm_mon -1 |grep "heartbeat:IPaddr2" |awk '{print $4}'
UserParameter=pacemaker-arp.master, sudo /usr/sbin/crm_mon -1 |grep "heartbeat:SendArp" |awk '{print $4}'

UserParameter=pacemaker.ha.failchk, /usr/bin/sudo /usr/sbin/crm_mon -1| /bin/egrep -i 'Failed actions'
UserParameter=pacemaker.ha.onlinechk, /usr/bin/sudo /usr/sbin/crm_mon -1| /bin/egrep -i 'offline'
UserParameter=pacemaker.ha.cleanchk, /usr/bin/sudo /usr/sbin/crm_mon -1| /bin/egrep -i 'UNCLEAN'

UserParameter=redis_stats[*],/etc/zabbix/scripts/redis.pl $1 $2 $3

Website monitoring

# mysite monitoring
UserParameter=mysite.check,wget --no-check-certificate -O- https://mysite.com/login

Local health check

#  monitoring
UserParameter=app.myadmin.health, /usr/bin/w3m -dump -no-cookie http://localhost:8080/health | sed 's///g' | grep -wc 'appStatus=\"1\"'


Mongo Monitoring

UserParameter=mongo.uptime, /etc/zabbix/scripts/mongo_plugin.py serverStatus uptime
UserParameter=mongo.current.connections, /etc/zabbix/scripts/mongo_plugin.py serverStatus connections current
UserParameter=mongo.available.connections, /etc/zabbix/scripts/mongo_plugin.py serverStatus connections available


# Custom LDAP monitoring
UserParameter=ldap.search.status, /usr/bin/ldapsearch -x -b '' -s base '(objectclass=*)' namingContexts | grep -wc "Success"


Mysql-Slave monitoring

# MySQL Slave Process monitoring

# MySQL Slave Process monitoring
UserParameter=mysql.ha.Slave_IO_Running, /usr/bin/mysql -h "localhost" -u "zabbix" -p"xxxxxxx" "mysql" -se  "show slave status\G" | grep Slave_IO_Running | awk '{print $2}'
UserParameter=mysql.ha.Last_SQL_Errno, /usr/bin/mysql -h "localhost" -u "zabbix" -p"xxxxxxx" "mysql" -se  "show slave status\G" | grep Last_SQL_Errno | awk '{print $2}'
UserParameter=mysql.ha.Seconds_Behind_Master, /usr/bin/mysql -h "localhost" -u "zabbix" -p"xxxxxxx" "mysql" -se  "show slave status\G" | grep Seconds_Behind_Master | awk '{print $2}'
UserParameter=mysql.ha.Slave_SQL_Running, /usr/bin/mysql -h "localhost" -u "zabbix" -p"xxxxxxx" "mysql" -se  "show slave status\G" | grep Slave_SQL_Running | awk '{print $2}'
UserParameter=mysql.ha.Last_Errno, /usr/bin/mysql -h "localhost" -u "zabbix" -p"xxxxxxx" "mysql" -se  "show slave status\G" | grep Last_Errno | awk '{print $2}'
UserParameter=mysql.ha.Last_IO_Errno, /usr/bin/mysql -h "localhost" -u "zabbix" -p"xxxxx" "mysql" -se  "show slave status\G" | grep Last_IO_Errno | awk '{print $2}'


#monitor SSL cert

UserParameter=cert_check[*],/etc/zabbix/externalscripts/ssl_check.sh $1

(You have to create a template in zabbix based on this )

Thursday, October 17, 2013

cannot find device br0 failed to bring up br0

Make sure you have bridge-utils package installed.

apt-get install bridge-utils

Sunday, October 6, 2013

Import a VM in KVM

Virt-Install

virt-install --name vmhost9-vm01 --ram 8192 --vcpus=4 --disk path=/var/lib/libvirt/images/vmhost9-vm01.img,size=20 -v --accelerate --os-type=linux --keymap=en-us --network=bridge:br0 --import

Sunday, August 18, 2013

tcpdump for X-Forwarded-For header

How to take tcp dump for a service that is running on port 4002

#tcpdump -vvvs 1024 -l -A -w /tmp/web-1.pcap tcp port 4002
or
Take a dump to some specific network interface on some specific port.
tcpdump -i eth0.12 -s0 -w /tmp/web-1.pcap port 4002
How to read the tcpdump
tcpdump -X -vv -r web-1.pcap
or
use wireshark

Friday, August 16, 2013

Mysql dump and restore it on remote machine

SourceHost

#mysqldump --opt --skip-lock-tables -umyuser -pmy-secret-password teamcity --extended-insert --routines --add-drop-table | mysql -ubackup -pbackup -hremotehost.myodmain.com teamcity
If you want to reset the root password
mysql --defaults-file=/etc/mysql/debian.cnf
update mysql.user set password=PASSWORD('password') where user='root';

Wednesday, July 17, 2013

mount

# df
# cat /etc/fstab
# mount -a
# umount -a
 
sudo fdisk -l
 
Find the newly added hard disk,
 
To create the partition on the second Hard drive,
 
sudo fdisk /dev/sdx
u
n
p
w
 
Now we need to format our newly created partition using the following command:
 
sudo mkfs /dev/sdb1 -t ext4
 
verify
 
sudo fdisk -l
 
mount the newly created sdb1 partition into other directory
 
or 
{
 
cd /mnt/
sudo mkdir 2ndHDD 
sudo chmod 0777 2ndHDD
ls -l
 
sudo mount /dev/sdb1 /mnt/2ndHDD -t ext4  
 
sudo chmod 0777 2ndHDD
 
touch /mnt/2ndHDD/test.txt
ls /mnt/2ndHDD/* 
 
   
} 

mount -t ext4 -v /dev/sdb1 /opt/repository/myDir
 sudo vim /etc/fstab

/dev/sdb1    /opt/repository/myDir    ext4        defaults  0    1

sudo mount -a

Thursday, May 30, 2013

Vagrant: * Unknown configuration section 'berkshelf'.

Berkshalf vagrant variant has been renamed by its maintainer at
https://github.com/RiotGames/vagrant-berkshelf

Users using the old command will face the following error when
executing "vagrant up":

Vagrant:
* Unknown configuration section 'berkshelf'.
 
Solution:
 
sudo vagrant plugin install vagrant-berkshelf
 
vagrant up
 
 

Friday, May 10, 2013

File Share using Apache


File Share using Apache

Apache is pretty useful for sharing files on network particularly on LAN. Here is how to map an external folder to an url on your apache server. Lets assume your home directory is ABC and the path of the folder that you want to share is /var/log/weblogs. And you want to access contents of this folder at url /weblogs.

Change directory to /etc/apache2/sites-enabled. And create file named downloads with the following contents.

Alias "/weblogs" "/var/log/weblogs/"

Directory "/var/log/weblogs/"
     AllowOverride None
     Options Indexes
     Order allow,deny
     Allow from all

/Directory

Save the file and close it. Now restart the apache by issuing following command.
sudo apache2ctl restart
Now test everything is working as expected by going to following url in your browser.
http://localhost/weblogs

Wednesday, May 1, 2013

Reset forgotten mysql password


This works for ubuntu/Debain only & not for red hat/Centos

# /etc/init.d/mysql stop
# mysqld_safe --skip-grant-tables &
$ mysql -u root

mysql> use mysql;
mysql> update user set password=PASSWORD("password") where User='root';
mysql> flush privileges;
mysql> quit

# /etc/init.d/mysql stop
# /etc/init.d/mysql start
$ mysql -u root -p
 
Take Full backup of Mysql
mysqldump -u root -ppassword --all-databases > /tmp/all-database.sql 
Restore DB
mysql -u root -ppassword < /tmp/all-database.sql

Saturday, April 20, 2013

IP Tables Port Redirection

PublicIP(NAT BOX):5001

PrivateIP:5001

iptables -t nat -I PREROUTING --src 0/0 --dst PrivateIP -p tcp --dport 5001 -j REDIRECT --to-ports 5001

iptables -I FORWARD -p tcp -d PrivateIP --dport 5001 -i eth0 -j ACCEPT

Friday, April 12, 2013

SBT Launcher

Copying the launcher into the bin dir and creating the script

#!/bin/sh
java -Xmx256M -jar `dirname $0`/sbt-launcher.jar "$@"
OR
A better script for the launch
FILENAME=$0if [ -h $FILENAME ]
  then
    FILE_NAME=`readlink $0`
fi
java -Xmx512M -jar `dirname $FILE_NAME`/sbt-launch-0.7.5.jar "$@"
so the sbt file can be placed on any path:
mkdir /opt/sbt
cd /opt/sbt
wget  http://simple-build-tool.googlecode.com/files/sbt-launch-0.7.5.jar

create a file and add the script above

vim sbt

chmod 755 sbt
ln -s /opt/sbt/sbt /usr/bin/sbt
 

Saturday, March 9, 2013

sshpass for remote command execution

sshpass is a utility designed for running ssh using the mode referred to as "keyboard-interactive" password authentication, but in non-interactive mode.

#sudo apt-get install sshpass
Logint to a remote macine
$ sshpass -p 'your_secret_Pass' ssh username@server.yourDomain.com

Create a script to run the command on multiple machines

root@myMachine:/var/local/scritps# cat remote_cmd_script.sh
LIST=`/bin/cat /var/local/scripts/machineListFile.txt`
for i in $LIST
do
 echo $i
 sshpass -p 'xxxxx' ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@$i.myDomain.com $1
 echo " "
done

run the command from terminal

#./remote_cmd_script "ps aux | grep mysql"

Wednesday, February 27, 2013

Install TEAMCITY Server on Ubuntu 12.04 LTS

Install teamcity on Ubuntu 12.04 LTS

#adduser --system --shell /bin/bash --gecos 'TeamCity Build Control' --group --disabled-password --home /opt/teamcity teamcity
http://download.jetbrains.com/teamcity/TeamCity-7.1.4.tar.gz
tar -xzvf TeamCity-7.1.4.tar.gz
Ensure you have JRE or JDK installed and JAVA_HOME environment variable is pointing to the Java installation directory. Latest Oracle Java 1.6 update is recommended.

teamcity@teamcity:~/TeamCity$ which java
teamcity@teamcity:~/TeamCity$

//nothing comes :) - go to oracle site and download JDK 1.6 (and not 1.7) , please do not install openJDK

http://www.oracle.com/technetwork/java/javase/downloads/index.html
Select the version of java you want to install
for our case we will install Java SE 6 Update 41 (JDK) though JRE is just fine for TC server part (Agents though need JDK only)

chmod +x jdk-6u41-linux-x64.bin
root@teamcity:/var/local# ./jdk-6u41-linux-x64.bin

root@teamcity:/var/local# ls
jdk1.6.0_41  jdk-6u41-linux-x64.bin
root@teamcity:/var/local#
sudo mkdir -p /usr/lib/jvm/
sudo update-alternatives --install "/usr/bin/java" "java" "/usr/lib/jvm/jdk1.6.0_41/bin/java" 1
sudo update-alternatives --install "/usr/bin/javac" "javac" "/usr/lib/jvm/jdk1.6.0_41/bin/javac" 1

root@teamcity:/usr/lib/jvm# which java
/usr/bin/java
root@teamcity:/usr/lib/jvm# java -version
java version "1.6.0_41"
Java(TM) SE Runtime Environment (build 1.6.0_41-b02)
Java HotSpot(TM) 64-Bit Server VM (build 20.14-b01, mixed mode)
root@teamcity:/usr/lib/jvm#

if you have multiple version of java then you can choose the one that you may want to use for your application
sudo update-alternatives --config java

Setting up the Database

sudo apt-get install mysql-server
root@teamcity:/usr/lib/jvm# mysql -uroot -p
Enter password:
Welcome to the MySQL monitor.  Commands end with ; or \g.

Installation:



Create an empty database for TeamCity in MySQL and grant permissions to modify this database to a user from which TeamCity will work with this database.
mysql> create database teamcity;
mysql> create user 'tcclient'@'localhost' identified by 'your_password_here';
mysql> grant all privileges on teamcity.* to 'tcclient'@'localhost' with grant option;
mysql> exit;



http://hostname:8111/mnt

Please review the settings below before proceeding with the first TeamCity start.

TeamCity stores configuration settings on disk in a Data Directory which is empty or does not contain TeamCity settings right now.

Location of the Data Directory: /opt/teamcity/.BuildServer

Proceed to use the directory and start with a fresh installation.

Press "Proceed"
It will create the database  and ask for license agreement
Then create a userAccount individually or Configure LDAP






It will create a .BuildServer directory  under teamcity user home directory.
drwxrwxr-x  6 teamcity teamcity 4096 Feb 27 18:16 .BuildServer

drwxr-xr-x 12 teamcity teamcity 4096 Feb 27 18:14 TeamCity
-rw-------  1 teamcity teamcity  849 Feb 27 18:12 .viminfo

teamcity@teamcity:~/.BuildServer/config$ cp -a database.mysql.properties.dist database.properties
teamcity@teamcity:~/.BuildServer/config$ vim database.properties

# Database: MySQL 

connectionUrl=jdbc:mysql://localhost:3306/teamcity
connectionProperties.user=tcclient
connectionProperties.password=xxxxxxxxxx



TeamCity Data Directory is the directory on the file system used by TeamCity server to store configuration settings, build results and current operation files. The directory is the primary storage for all the configuration settings and holds the data critical to the TeamCity installation.

Download the MySQL JDBC driver from http://dev.mysql.com/downloads/connector/j/.
Install MySQL connector driver jar (mysql-connector-java-*-bin.jar from the downloaded archive).
tar -xvf mysql-connector-java-5.1.22.tar


You will need to download the Java driver and put it into .BuildServer/lib/jdbc directory (create it if necessary).
teamcity@teamcity:~/.BuildServer/lib/jdbc$ ls
mysql-connector-java-5.1.22-bin.jar


If you want to connect the Teamcity Server with the ldap server then use the file ldap-config.properties which is under
teamcity@teamcity:~/.BuildServer/config$


main-config.xml Configuration
Configure login-module settings of /config/main-config.xml as follows:


   
   
    Welcome to TeamCity, your team building environment!
   
   



=========Performance===========
If you need a high performing Teamcity environment then you may want to look for few things like

- User external database (mySql) , use InnoDB as database engine.
-TC is I/O intensive and  high speed disk drives are good for performance  e.g SAS 15K RPM or SSD
- Separate OS and Teamcity installation drive . RAID 1 is Pretty good.
-multiple CPU are good bet as Teamcity supports multi thread env.
-RAM - 32 GB-64 GB . More tests you are running per build more RAM you are going to need.
-Git is more memory intensive on server side in comparison to SVN.
- Giga bit LAN is a plus :)


Sizing the Generations
  • The -Xmx value determines the size of the heap to reserve at JVM initialization.
  • The -Xms value is the space in memory that is committed to the VM at init. The JVM can grow to the size of -Xmx.
  • The difference between -Xmx and -Xms is virtual memory (virtually committed)
Total Heap
  • Total available memory is the most important factor affecting GC performance
  • By default the JVM grows or shrinks the heap at each GC to keep the ratio of free space to live objects at each collection within a specified range.
    • -XX:MinHeapFreeRatio - when the percentage of free space in a generation falls below this value the generation will be expanded to meet this percentage. Default is 40
    • -XX:MaxHeapFreeRatio - when the percentage of free space in a generation exceeded this value the generation will shrink to meet this value. Default is 70
  • For server applications
    • Unless you have problems with pauses grant as much memory as possible to the JVM
    • Set -Xms and -Xmx close to each other or equal for a faster startup (removes constant resizing of JVM). But if you make a poor choice the JVM can't compensate for it.
    • Increase memory sa you increase # of processors because memory allocation can be parallelized.
  • Types of Collectors
    • Everything to this point talks about the default garbage collector, there are other GC's you can use
    • Throughput Collector - Uses a parallel version of the young generation collector
      • -XX:+UseParallelGC
      • Tenured collector is the same as in default
    • Concurrent Low Pause Collector
      • Collects tenured collection concurrently with the execution of the app.
      • The app is paused for short periods during collection
      • -XX:+UseConcMarkSweepGC
      • To enable a parallel young generation GC with the concurrent GC add -XX:+UseParNewGC to the startup. Don't add -XX:+UseParallelGC with this option.
    • Incremental Low Pause Collector
      • Sometimes called Train Collector
      • Collects a portion of the tenured generation at each minor collection.
      • Tries to minimize large pause of major collections
      • Slower than the default collector when considering overall throughput
      • Good for client apps (my observation)
      • -Xincgc
    • Don't mix these options, JVM may not behave as expected
  •  Measurements with the Incremental Collector
    • -verbose:gc and -XX:+PrintGCDetails
  • If the garbage collector has become a bottleneck, you may wish to customize the generation sizes. Check the verbose garbage collector output, and then explore the sensitivity of your individual performance metric to the garbage collector parameters.






===============================================
http://www.delcomproducts.com/products_usblmp.asp

CCTRAY API:
http://tc.loyal3.com/httpAuth/app/rest/cctray/projects.xml

DOCS:
http://confluence.jetbrains.com/display/TW/REST+API+Plugin#RESTAPIPlugin-BuildStatusIcon

Tricks


File searching/manipulation

Copy all files inc subdirs:
cp -a /usr/local/foo/* /var/temp/bar
Delete all files inc subdirs:
rm -rf folder
SCP a folder including sub-dirs:
scp -r folder/ 1.1.1.1:
Find files with indexer:
updatedb
Locate filename
Find files without indexer:
find /dir -name filename
find /dir -name '*part of file name*'
Delete everything older than 7 days:
find /directoryname -type f -mtime +7 -exec rm {} \;
Search text within files and print the lines:
find /dir -type f -exec grep "textinfile" {} \;
Search text within files and print only the filenames:
find /dir -type f | xargs grep -li "textinfile"
Search and replace over multiple files:
perl -pi -w -e 's/old/new/g;' *.php
Show files accessed this year:
ls -Rlua /dir|grep -v '\.$'|grep `date "+%Y"`
Du for each folder without showing subdirs:
for i in `find . -maxdepth 1 -type d`; do du -sh $i; done;
Remove duplicate files [1] The script below will find duplicate files (files with the same md5sum) in a specified directory and output a new shell script containing commented-out rm statements for deleting them. You can then edit this output to decide which to keep.
OUTF=rem-duplicates.sh;
echo "#! /bin/sh" > $OUTF;
find "$@" -type f -print0 |
  xargs -0 -n1 md5sum |
    sort --key=1,32 | uniq -w 32 -d --all-repeated=separate |
    sed -r 's/^[0-9a-f]*( )*//;s/([^a-zA-Z0-9./_-])/\\\1/g;s/(.+)/#rm \1/' >> $OUTF;
chmod a+x $OUTF; ls -l $OUTF
Networking
Show listening ports and the processes using the ports:
sudo netstat -ltnup
Ascertain what line in the rouitng table a particular destination ip uses:
/sbin/ip ro get 1.1.1.1
View routing table:
route -n
Add static route:
sudo ip route add networkaddress/cidr via next_hop_ip
Show processes sorted by memory usage descending:
ps -e -orss=,args= | sort -b -k1,1n | pr -TW$COLUMNS
Text Manipulation
Strip out a single character from text:
Strip colons from a MAC address, tr cannot be used when stripping a phrase
echo 00:00:00:00:00:00 | tr -d ':'
000000000000
Strip out the phrase 'remove me' from text:
cat file|sed '/s/remove me//g'
Delete the first character of every line:
cat file|sed 's/^.//'
Find words in garbled text [2]
echo "Garbled Text" | grep -o -F -f /usr/share/dict/words | sed -e "/^.$/d"
Get columns 1 and 3 and seperate with commas:
cat file | awk -v OFS=',' '{print $1, $3}'
Setup a portable prompt with user@host: pwd on systems that have an old prompt by default, like some BSD machines
PS1='\u@\h:\w\$ '
Add to bashrc for a scpnewestfile user@host:/dir command to automatically scp the most recently modified file to another host:
scpnewestfile()
{
scp `ls -ltr|tail -1|tr -s ' '|cut -d ' ' -f8` $1
}
Archiving
Create tar archive
tar -cvzf files.tar.gz file1 file2 file3 filen
Create bz2 archive of dir/
tar -c dir/ | bzip2 > dir.tar.bz2
Extract archive
bzip2 -dc dir.tar.bz2|tar -x
Copy dir/ with permissions to remote machine
( tar -c /dir/to/copy ) | ssh -C user@remote 'cd /where/to/ && tar -x -p'
Case-insensitive search in less: or type -i
/normal search text here/i
Redirecting Output
Redirect standard output and standard error to a file:
command >file 2>&1
Pipe standard output and standard error: tee in this example
script 2>&1 | tee file
SSH
SSH Auto complete:
SSH_COMPLETE=( $(cat ~/.ssh/known_hosts | \
                 cut -f 1 -d ' ' | \
                 sed -e s/,.*//g | \
                 uniq | \
                 egrep -v [0123456789]) )
complete -o default -W "${SSH_COMPLETE[*]}" ssh
RPM
Query all installed packages, similar to yum list installed
rpm -qa
Query package owning a file
rpm -qf /bin/file
List files installed by a package
rpm -qc package
List status of files installed by a package
rpm -qs package
Aptitude
Search for packages of a given name or description:

apt-cache search virtualbox

Search for packages that contain a certain file

apt-file search filename
Auto install security updates
Package unattended-upgrades allows this - installed by default but not automatically scheduled to run by default.
Install security updates only:
sudo unattended-upgrade
Configure to download/install security updates automatically, excluding any that have additional dependancies:
sudo dpkg-reconfigure -plow unattended-upgrades
Search and replace
:%s/OLD/NEW/g
Delete all occurrences of a string
:g/STRING/d
Delete all empty lines
:g/^$/d
Delete all lines with only spaces
:g/^ *$/d
Delete all duplicate lines
:sort u
Jump to beginning / end of file
'G' alone moves the cursor to the end of file; 'gg' to the beginning of the file.
Show line numbers
:set number
Hide line numbers
:set nonumber
Goto line number 20
:20
Cut/Delete a whole line
To cut the current line:
dd
To cut 10 lines below the cursor:
10dd
Copy/Yank many lines
To copy the current line:
Y

To copy 10 lines below the cursor:
10Y

To copy all lines below the curosr:
yG
Paste a whole line
The data from this line is placed in a local clipboard called a register and can be restored with 'p' (below cursor) or 'P' (above cursor).
Insert text at the beginning on each line
Insert a double quote " at the beginning of each line: :%s!^!"!
Insert text at the beginning of all lines below the cursor
Useful for commenting out text in bulk in Apache, insert a # at the beginning of all ines below the cursor:
:.,$s!^!#!
. means current line , range delimeter $ refers to EOF/last line s substitute command
Show Clipboard items / registers
:reg
Paste from register number 2
"2p
Allow pasting in Vi without Indents
On earlier distributions pasting in Vi from the OS can be tricky as indents are placed on the lines. To disable this:
:set paste
Create a new line below the current line

o

Append text at the end of the current line

A
Visual mode commands
Whilst in command mode, press v to enter visual mode. The cursor keys can now be used to select text. the following commands will affect the text that has been selected. Copy/Yank and Cut/Delete can also be used with visual mode.
Indent text
>
De-Indent text
<
Sort text
To pipe any selected text into an external command and replace with its output:
:! command
Use sort:
:! sort
Using external commands
Vim can run an external command with the current file. The file saved on disk or the current unsaved buffer can be used: % File on disk - Current buffer
Diff current unsaved buffer with saved version
:w !diff % -
Get number of lines through wc
:w !wc -
Syntax check of PHP
:w !php5 -l -
Recall previous external command
Same as Bash: :!!
Insert command output
:r !echo hello
Settings commands by default
If a particular option or command needs to be the default in Vim - it needs to be run every time you start Vim - create a text file called .exrc or .vimrc in your home directory and place your commands in there without the proceeding colon. For example to display line numbers and search case insensitively by default your .vimrc file would look like this: set number set ignorecase
Viewing panes
To split the current file into two horizontal panes: :split Use Ctrl+W to switch panes.
Recall previous command
Press Esc to enter command mode, type a single colon, after which the up and down keys can be used in the same manner as BASH.
Redo last typed text
Type a set of text in INSERT mode and go into COMMAND mode by pressing Esc. Now pressing . (a dot/period) will re-type the text typed in.
Tabs
Vim can open many files at once and arrange them in tabs:
vim -p blah.csv blah2.csv
To move between tabs use :tabn and :tabp
Run a command on all files/tabs open
:tabdo $s/replace/this/g
To force a user to change their password upon their next login:
chage -d 0 user
Delete occurances of a string
awk '{ gsub(/string/,""); print }'
Replace text1 with text2
awk '{ gsub(/text1/,text2); print }'
Replace text1 with text2 only lines that contain Hello
awk '/Hello/ { gsub(/text1/,text2); print }'
Replace text1 with text2 only lines that do NOT contain Hello
awk '!/Hello/ { gsub(/text1/,text2); print }'
Swap first field with the second field
awk '{ temp = $1; $1 = $2; $2 = temp; print }'
Delete the second field
awk '{ $2 = ""; print }'
Remove duplicate, nonconsecutive lines
awk '!a[$0]++'
Show lines based on field-criteria
Show processes in uninterruptible sleep (usually IO):
ps aux|awk '{if ($8 == "D") print}'
The right way to logrotate, keep 14 days with helpful dates and the filenames do not change every day for your logs:
/var/log/app/app.log /var/log/app/app_something.log { rotate 14 daily compress missingok notifempty sharedscripts postrotate /bin/kill -HUP cat /var/run/app.pid 2>/dev/null 2> /dev/null || true endscript dateext }
/usr/sbin/logrotate -d /etc/logrotate.conf
Max file descriptor limit (Linux)
Get current limit $ cat /proc/sys/fs/file-max 205076
Get current number of open files lsof|wc -l
Increase the max file descriptor limit
Edit /etc/sysctl.conf fs.file-max = 331287
Scan for SSH servers in a subnet whilst outputting the version string in a format that is easily greppable or parsed into a script of some kind:
nmap -p22 --open -PN -sV -oG ssh_hosts 10.1.100.0/24
-PN states that nmap should not try to ping the host. If it does then it can detect a host as being down when it is in actual fact up.
top
Shows continuously-updating information for processes. Processes are sorted in CPU usage descending by default.
Load averages are shown for the last 1, 5 and 15 minutes. Every system is different but as a guide a single CPU system should normally not have a 15 minute load average over 2.
Interactive commands
Command Function A Displays 4 windows: Window 1: Sorted by CPU descending. Window 2: Sorted by PID descending - shows newest porcesses. Window 3: Sorted by Memory usage descending. Window 4: Shows your processes. Space Update now P Sort by CPU usage descending M Sort by Memory usage descending T Sort by cumulative time f Show/Hide fields o Change column order 1 Show individual CPUs c Toggle show command-lines
CPU Usage summary fields
us = user mode sy = system mode ni = low priority user mode (nice) id = idle task wa = I/O waiting - waiting for disk to complete operation usually hi = servicing hardware interrupts si = servicing software interrupts t = steal (time given to other DomU instances)
ps
To show all processes including their command-lines: ps aux
lsof
View current open files for all processes: lsof -n
View all open files in /var/log:
lsof +D /var/log
sar
Sar shows system activity history and is very useful for ascertaining activity when a cron job runs when it cannot be run in daytime hours. Show CPU activity history:
sar
Show IO history: sar -b
Show paging history: sar -B
Show load average and queue history sar -q
Show memory usage history: sar -r
free
To show the current free and in-use physical RAM and swapped with low/high values in MB:
free -ml
fuser
Fuser shows what processes are using a given file, socket or filesystem.
Show processes that have locked files in /var: sudo fuser -v /var
iostat
Iostat gives statistics on I/O and CPU usage. iostat without any arguments displays transfers per second since boot as 'tps' although a single logical transfer could be any size and can therefore be misleading. The number of blocks read and written since boot and the blocks read and written per second 'since boot' can also be viewed. iostat -x gives more information, but this again is not very useful as it only shows the total since boot.
Iostat is more useful when it is giving repeating output as each output after the first output shows stats 'since the last output' rather than since boot. iostat 1 5 will give 5 outputs with a 1 second delay. The 2nd - 5th output will look different to the first for this reason.
Get extended IO statistics, updating once per second: iostat 1 -x
The column that will indicate a bottleneck in the I/O are shown below: Column Definition avgqu-sz
The average size (in sectors) of the requests that were issued to the device. await The average time spent servicing I/O requests, including the time spent in a queue. %util CPU utilisation of the device when requests were issued.
Tcpdump
Tcpdump gives detailed verbose network traffic output. Some examples are shown below, which all refer to the interface eth0. Use ifconfig -a to find all interfaces.
Show HTTP traffic:
tcpdump -i eth0 'tcp port 80'
Show DNS traffic: tcpdump -i eth1 'udp port 53'
Show DNS traffic, and HTTP traffic on ports and 80, 81 and 82:
tcpdump -i eth0 udp port 53 or tcp ( 80 or 81 or 82 )
Show traffic on port 80 for 1.1.1.1: tcpdump -i eth0 dst 1.1.1.1 and tcp port 80
Show traffic on port 80 for 1.1.1.1 with an increased sniff size of 1024 bytes:
tcpdump -i eth0 -s 1024 dst 1.1.1.1 and tcp port 80
Send output to a pcap file
The -w parameter captures the output to a file that can be viewed later.
Capture SSH traffic for 2.2.2.2 and output to a pcap file named ssh.pcap:
tcpdump -i eth0 -w ssh.pcap dst 2.2.2.2 tcp port 22
tcpdump -vvn -i eth0 -w blah.out -s 1500
This output can then be viewed later:
tcpdump -ttttnnr ssh.pcap
strace
strace shows the systems calls for a process or programme. Try strace echo hello. Strace can either invoke a process and monitor system calls end-to-end or attach itself to a process already running. Monitor system calls for pid 12345: strace -p 12345
Ascertain what config files are read on startup
This is useful for ascertaining why a particular config file is not being read from the location it should be in your experience and where it could be reading it from instead. The -e parameter allows the displaying of a particular system call only. For example the SQL query tool tsql refused to read its configuration on a monitoring server when it worked fine on another. The command below opens tsql and captured all the open system calls:
strace -e open tsql -S testconfig -U test -P test strace -e open,access executable | grep filename.conf
Ascertain what a process is doing
If a particular process is using a lot of CPU or I/O log files along may give no information as to what is causing the problem. The -p parameter described above achieve this.
Ascertain what system calls are being used the most
Statistics on what system calls are being used can be shown with the -c parameter. This can more easily show why a process is using high CPU or I/O by showing which system calls are being used the most.
Show systems calls by the ls / command
[user@host /]$ strace -c ls /usr >/dev/null % time seconds usecs/call calls errors syscall

55.00 0.000055 3 17 read 45.00 0.000045 2 19 fstat64 0.00 0.000000 0 1 write 0.00 0.000000 0 19 open 0.00 0.000000 0 25 close 0.00 0.000000 0 1 execve 0.00 0.000000 0 2 1 access 0.00 0.000000 0 3 brk 0.00 0.000000 0 3 3 ioctl 0.00 0.000000 0 8 munmap 0.00 0.000000 0 1 uname 0.00 0.000000 0 6 mprotect 0.00 0.000000 0 2 readv 0.00 0.000000 0 2 rt_sigaction 0.00 0.000000 0 1 rt_sigprocmask 0.00 0.000000 0 1 getrlimit 0.00 0.000000 0 33 mmap2 0.00 0.000000 0 1 stat64 0.00 0.000000 0 2 getdents64 0.00 0.000000 0 13 fcntl64 0.00 0.000000 0 1 futex 0.00 0.000000 0 1 set_thread_area 0.00 0.000000 0 1 set_tid_address 0.00 0.000000 0 1 set_robust_list 0.00 0.000000 0 5 socket 0.00 0.000000 0 5 4 connect 0.00 0.000000 0 1 sendmsg

100.00 0.000100 175 8 total
Ascertain why a process cannot connect to a server
tcpdump can be used instead although strace will show information on a specific process and thereby avoid most of the 'chatter.' Below is an exanple of curl connecting to ben.goodacre.name without any problems:
1 [user@host /]$ strace -e poll,select,connect,recvfrom,sendto curl ben.goodacre.name >/dev/null 2 connect(3, {sa_family=AF_FILE, path="/var/run/setrans/.setrans-unix"...}, 110) = 0 3 connect(3, {sa_family=AF_FILE, path="/var/run/nscd/socket"...}, 110) = -1 ENOENT (No such file or directory) 4 connect(3, {sa_family=AF_FILE, path="/var/run/nscd/socket"...}, 110) = -1 ENOENT (No such file or directory) 5 connect(3, {sa_family=AF_INET, sin_port=htons(53), sin_addr=inet_addr("127.0.0.1")}, 28) = 0 6 poll([{fd=3, events=POLLOUT}], 1, 0) = 1 ([{fd=3, revents=POLLOUT}]) 7 poll([{fd=3, events=POLLIN}], 1, 5000) = 1 ([{fd=3, revents=POLLIN}]) 8 recvfrom(3, "\223\203\201\200\0\1\0\1\0\1\0\0\3ben\10goodacre\4name\0\0"..., 1024, 0, {sa_family=AF_INET, sin_port=htons(53), sin_addr=inet_addr("127.0.0.1")}, [16]) = 120 9 connect(3, {sa_family=AF_INET, sin_port=htons(53), sin_addr=inet_addr("127.0.0.1")}, 28) = 0 10 poll([{fd=3, events=POLLOUT}], 1, 0) = 1 ([{fd=3, revents=POLLOUT}]) 11 poll([{fd=3, events=POLLIN}], 1, 5000) = 1 ([{fd=3, revents=POLLIN}]) 12 recvfrom(3, "\f\177\201\200\0\1\0\2\0\r\0\f\3ben\10goodacre\4name\0\0"..., 1024, 0, {sa_family=AF_INET, sin_port=htons(53), sin_addr=inet_addr("127.0.0.1")}, [16]) = 504 13 connect(3, {sa_family=AF_INET, sin_port=htons(80), sin_addr=inet_addr("67.223.225.228")}, 16) = -1 EINPROGRESS (Operation now in progress) 14 poll([{fd=3, events=POLLOUT}], 1, 300000) = 1 ([{fd=3, revents=POLLOUT}]) 15 poll([{fd=3, events=POLLIN}], 1, 1000) = 1 ([{fd=3, revents=POLLIN}]) 16 poll([{fd=3, events=POLLIN}], 1, 0) = 1 ([{fd=3, revents=POLLIN}]) 17 % Total % Received % Xferd Average Speed Time Time Time Current 18 Dload Upload Total Spent Left Speed 19 100 701 100 701 0 0 1609 0 --:--:-- --:--:-- --:--:-- 0
Lines 3 and 4 show curl connecting to the NSCD - Name Service Cache Daemon - which is used for LDAP lookups among other things. *The connect fails so it moves onto DNS on lines 5 and 9 where it connects to port 53 which is shown by sin_port=htons(53). It connects to localhost - sin_addr=inet_addr("127.0.0.1") as a DNS server listens to localhost and is configured in /etc/resolv.conf.
The DNS connection is done twice as ben.goodacre.name is a CNAME, so a second DNS lookup is required. Line 8 is where the CNAME is received and line 12 is where the IP address is returned.
The curl client is seen connecting to the IP 67.223.225.228 of this website on line 13. EINPROGRESS shows the connection has been attempted but not that it has succeeded.
The complete POLLOUT line shows a sucessful connect, on line 14.
Try strace -e poll,select,connect,recvfrom,sendto curl ben.goodacre.name:111 >/dev/null to see what the POLLOUT line looks like when curl cannot connect. Port 111 is not open.
Symptom
  1. When using any command or application (such as ssh or scp) that writes to /dev/null the following error could occur:
Couldn't open /dev/null
  1. When attempting to ssh the following error is given, even when SSHing to localhost:
Host key verification failed.
When running ssh -vvv user@localhost the exact debug line relating to this issue is: debug1: read_passphrase: can't open /dev/tty: Permission denied.
Cause
The permissions for the /dev/null device/special file do not allow writing to it.
Resolution
Alter the permissions using chmod, as root:
chmod a+rw /dev/tty
Setup an SSH tunnel on the localhost and taking connections from port 3335 and forwarding them onto port 3306 on a remote machine db02:
ssh -N localhost -L 3335:db02:3306 -f
scp cannot be used to append to a remote file, only replace it.
The line below will allow appending of a remote file with a local file over an SSH connection:
cat localfile | ssh user@host "cat >>remotefile"
Symptom
/var/log/messages states that packets are being dropped due to the limit being reached. The default limit is 65536 connections.
Cause
Unless you are using the Linux box as a router then an application is probably misbehaving. The limit is placed to ensure that bad applications cannot cause havok on networks, investigate hy the application is doing this first.
The current limit can be viewed by cating one of these files, the exact file can vary based on distro:
/proc/sys/net/netfilter/nf_conntrack_max /proc/sys/net/ipv4/netfilter/ip_conntrack_max
If the file cannot be found try: find /proc -name 'conntrack_max'
Resolution
If the connections are legitamate the limit can be increased by echoing the number to the conntrack_max file as shown below:
cat 250000 >/proc/sys/net/netfilter/nf_conntrack_max
To make the changes permanent edit the /etc/sysctl.conf file and edit or add a line based on the location of the /proc file in your distro:
net.netfilter.nf_conntrack_max = 250000
To display a count of all network connections by their state:
$ sudo netstat -ant | awk '{print $6}' | sort | uniq -c | sort -n
Regular expressions and used in Perl, Apache Rewrite, Vim among many other uses.
Regex Meaning . Any single character except a newline ^ The beginning of the line or string $ The end of the line or string * Zero or more of the last character + One or more of the last character ? Zero or one of the last character (ben.)?goodacre.name ben.goodacre.name or goodacre.name t.e t followed by any 1 single character followed by e. This will match the , tre , tle , but not te or tale ^f f at the beginning of a line ^ftp ftp at the beginning of a line e$ e at the end of a line und un followed by zero or more d characters. This will match un , und , undd , unddd etc. . Any string without a newline. This is because the . matches anything except a newline and the * means zero or more of these. ^$ A line with nothing in it. [qjk] Either q or j or k [^qjk] Neither q nor j nor k [a-z] Anything from a to z inclusive [^a-z] No lower case letters [a-zA-Z] Any letter [a-z]+ Any non-zero sequence of lower case letters [^qjk]+ Any non-zero sequence that does not contain a q nor j nor k. jelly|cream Either jelly or cream le)gs Either eggs or legs (da)+ Either da or dada or dadada or... \n A newline \t A tab \w Any alphanumeric (word) character. The same as [a-zA-Z0-9] \W Any non-word character. The same as [^a-zA-Z0-9] \d Any digit. The same as [0-9] \D Any non-digit. The same as [^0-9] \s Any whitespace character: space, tab, newline, etc \S Any non-whitespace character \b A word boundary, outside [] only \B No word boundary
Ref: http://ben.goodacre.nam
Add system user
adduser --system --shell /bin/bash --gecos 'TeamCity Build Control' --group --disabled-password --home /opt/teamcity teamcity
Kill a process
ps aux | grep ProcessName |awk '{print $2}'|xargs kill -9
kill -9 ps -ef | grep splunk | grep -v grep | awk '{print $2;}'
Find Dependency
ldd /usr/lib/jvm/jdk1.7.0/jre/lib/amd64/xawt/libmawt.so |grep "not found"
Sed:
./remote_cmd2.sh "sed -i 's/10.33.64.15\tpuppet//g' /etc/hosts"
How to RAM is supported by a linux machine
sudo dmidecode -t 16

dmidecode 2.11

SMBIOS 2.7 present.
Handle 0x0024, DMI type 16, 23 bytes Physical Memory Array Location: System Board Or Motherboard Use: System Memory Error Correction Type: Multi-bit ECC Maximum Capacity: 32 GB Error Information Handle: No Error Number Of Devices: 4