alamise

Website URL:

命令行查看Memcached运行状态(shell或者telnet) 测试 Memcach

查看memcached状态的基本命令,通过这个命令可以看到如下信息:

STAT pid 22459                             进程ID
STAT uptime 1027046                        服务器运行秒数
STAT time 1273043062                       服务器当前unix时间戳
STAT version 1.4.4                         服务器版本
STAT pointer_size 64                       操作系统字大小(这台服务器是64位的)
STAT rusage_user 0.040000                  进程累计用户时间
STAT rusage_system 0.260000                进程累计系统时间
STAT curr_connections 10                   当前打开连接数
STAT total_connections 82                  曾打开的连接总数
STAT connection_structures 13              服务器分配的连接结构数
STAT cmd_get 54                            执行get命令总数
STAT cmd_set 34                            执行set命令总数
STAT cmd_flush 3                           指向flush_all命令总数
STAT get_hits 9                            get命中次数
STAT get_misses 45                         get未命中次数
STAT delete_misses 5                       delete未命中次数
STAT delete_hits 1                         delete命中次数
STAT incr_misses 0                         incr未命中次数
STAT incr_hits 0                           incr命中次数
STAT decr_misses 0                         decr未命中次数
STAT decr_hits 0                           decr命中次数
STAT cas_misses 0   cas未命中次数
STAT cas_hits 0                            cas命中次数
STAT cas_badval 0                          使用擦拭次数
STAT auth_cmds 0
STAT auth_errors 0
STAT bytes_read 15785                      读取字节总数
STAT bytes_written 15222                   写入字节总数
STAT limit_maxbytes 1048576                分配的内存数(字节)
STAT accepting_conns 1                     目前接受的链接数
STAT listen_disabled_num 0                 
STAT threads 4                             线程数
STAT conn_yields 0
STAT bytes 0                               存储item字节数
STAT curr_items 0                          item个数
STAT total_items 34                        item总数
STAT evictions 0                           为获取空间删除item的总数

 

stats items

输出各个slab中的item信息。s

stats slabs

输出slab中更详细的item信息

stats sizes

输出所有item的大小和个数

 

stats cachedump <slab_id> <limit_num>

 
根据<slab_id>输出相同的<slab_id>中的item信息。<limit_num>是输出的个数,当<limit_num>为0是输出所有的item。

利用shell命令操作Memcached

1、数据存储(假设key为g,value为12345)

   

 printf "set g 0 0 5\r\n12345\r\n"|nc 127.0.0.1 11211

    STORED

    2、数据取回(假设key为zhangyan)

 

printf "get g\r\n"|nc 127.0.0.1 11211

    VALUE g 0 5
     12345
     END

    3、数值增加1(假设key为g,并且value为正整数)

   

 printf "incr g 1\r\n" | nc 127.0.0.1 11211

    12346

    4、数值减少3(假设key为g,并且value为正整数)

 

 printf "decr g  3\r\n" | nc 127.0.0.1 11211

    12343

    5、数据删除(假设key为g)

   

 printf "delete g\r\n" | nc 127.0.0.1 11211

    DELETED

    6、查看Memcached状态

 

 printf "stats\r\n" | nc 127.0.0.1 11211

  STAT pid 3025
 STAT uptime 4120500
 STAT time 1228021767
 STAT version 1.2.6
 STAT pointer_size 32
 STAT rusage_user 433.463103
 STAT rusage_system 1224.515845
 STAT curr_items 1132460
 STAT total_items 8980260
 STAT bytes 1895325386
 STAT curr_connections 252
 STAT total_connections 547850
 STAT connection_structures 1189
 STAT cmd_get 13619685
 STAT cmd_set 8980260
 STAT get_hits 6851607
 STAT get_misses 6768078
 STAT evictions 0
 STAT bytes_read 160396238246
 STAT bytes_written 260080686529
 STAT limit_maxbytes 2147483648
 STAT threads 1
 END

  7、模拟top命令,查看Memcached状态:

 

 watch "printf 'stats\r\n' | nc 127.0.0.1 11211"
  
  或者
  
   watch "echo stats | nc 127.0.0.1 11211"

一、echo stats items | nc127.0.0.1 11211
STAT items:1:number 998                   Slab Id=1 ; items数量:998(也就是已经存储了998个key值)
STAT items:1:age 604348                   Slab Id=1 ; 已经存在时间,单位秒
STAT items:1:evicted 0                        Slab Id=1 ; 被踢出的数量
STAT items:1:evicted_nonzero 0
STAT items:1:evicted_time 0
STAT items:1:outofmemory 0
STAT items:1:tailrepairs 0
STAT items:1:reclaimed 0
STAT items:6:number 91897              Slab Id=6 ; items数量:91897(也就是已经存储了91897个key值)
STAT items:6:age 604345                  Slab Id=6 ; 已经存在时间,单位秒
STAT items:6:evicted 0                       Slab Id=6 ; 被踢出的数量
STAT items:6:evicted_nonzero 0
STAT items:6:evicted_time 0
STAT items:6:outofmemory 0
STAT items:6:tailrepairs 0
STAT items:6:reclaimed 0

    Memcached centos 5 设置

     

    (3) 設置memcached

    vim /etc/sysconfig/memcached
    PORT="11211"
    USER="memcached"
    MAXCONN="1024"
    CACHESIZE="512"
    OPTIONS=""

    (4) 啟動memcached

    # Set Memcached to start automatically on boot
    chkconfig memcached on
    # Start Memcached
    /etc/init.d/memcached start
    ## OR ##
    service memcached start

    (5) 檢查memcached是否正常的運作。

    yum install nc

     

    nc = netcat

    echo stats | nc localhost 11211
    STAT pid 7599
    STAT uptime 10
    STAT time 1265288542
    STAT version 1.4.4
    STAT pointer_size 32
    STAT rusage_user 0.003999
    STAT rusage_system 0.052991
    STAT curr_connections 10
    STAT total_connections 11
    STAT connection_structures 11
    STAT cmd_get 0
    STAT cmd_set 0
    STAT cmd_flush 0
    STAT get_hits 0
    STAT get_misses 0
    STAT delete_misses 0
    STAT delete_hits 0
    STAT incr_misses 0
    STAT incr_hits 0
    STAT decr_misses 0
    STAT decr_hits 0
    STAT cas_misses 0
    STAT cas_hits 0
    STAT cas_badval 0
    STAT auth_cmds 0
    STAT auth_errors 0
    STAT bytes_read 6
    STAT bytes_written 0
    STAT limit_maxbytes 536870912
    STAT accepting_conns 1
    STAT listen_disabled_num 0
    STAT threads 4
    STAT conn_yields 0
    STAT bytes 0
    STAT curr_items 0
    STAT total_items 0
    STAT evictions 0
    END
    
    # Try to get some value
    echo get some_value | nc localhost 11211
    END
    
    # Not found, but check the stats again
    echo stats | nc localhost 11211
    STAT pid 7599
    STAT uptime 10
    STAT time 1265288542
    STAT version 1.4.4
    [...]
    STAT cmd_get 1
    STAT cmd_set 0
    STAT cmd_flush 0
    STAT get_hits 0
    STAT get_misses 1
    STAT delete_misses 0
    [...]
    STAT evictions 0
    END

     

      excel 英文递增

       

      A1输入公式后向下拉
      =IF(CEILING(ROW()/511758,1)-1=0,"",CHAR(MOD(CEILING(ROW()/511758,1)-2,26)+65))&IF(CEILING(ROW()/18954,1)-1=0,"",CHAR(MOD(CEILING(ROW()/18954,1)-2,26)+65))&IF(CEILING(ROW()/702,1)-1=0,"",CHAR(MOD(CEILING(ROW()/702,1)-2,26)+65))&IF(CEILING(ROW()/26,1)-1=0,"",CHAR(MOD(CEILING(ROW()/26,1)-2,26)+65))&CHAR(MOD((ROW()-1),26)+65)

       

        AMD APP SDK Runtime安装失败解决办法

        看到出了新的催化剂驱动就忍不住想更新下,结果其它组件都安装成功,就是AMD APP SDK Runtime报安装失败。百度了下没有找到正确的解决,猜想应该是以前安装的版本冲突。遂想到解决办法。首先安装微软的卸载工具windows installer clean up.运行后找到以前安装的amd app sdk runtime,点remove。再重新安装就ok了。

          在 Windows 上重置过期的 vCenter Single Sign On 管理员密码

           

          默认 Single Sign On 密码策略指定密码一年后过期。密码即将过期时,vSphere Web Client 不会发出警告。如果因 Single Sign On 系统的管理员密码过期而无法登录到 vSphere Web Client,拥有 Single Sign On 管理员特权的用户必须对其进行重置。

          vCenter Single Sign On 管理员用户的密码已过期,管理员无法登录到 vSphere Web Client 更改密码。


          在 vSphere Web Client 中更改密码。


          a

          以拥有 Single Sign On 管理员特权的用户身份登录到 vSphere Web Client

          b

          导航到统管理 > 访问 > SSO 用户和组,然后单击用户选项卡。

          c

          右键单击用户,然后选择编辑用户

          d

          输入新密码并予以确认。

          e

          单击确定


          在命令行更改密码。


          a

          打开终端窗口,然后导航到 C:\Program Files\VMware\Infrastructure\SSOServer\ssolscli

          b

          运行下列命令。

          ssopass username

          c

          输入用户的当前密码,即使该密码已过期。

          d

          输入新密码,然后再次输入进行确认。

          此时将重置管理员密码,用户可使用新凭据登录到 vSphere Web Client

           

            在VMWare中增加Linux文件系统空间

            • February 28, 2013
            • Published in vmware

             

            有时候会遇到这样的情况:刚开始给VMWare分配了一个虚拟硬盘,安装Linux后,随着工作中使用得越来越多,虚拟硬盘容量逐渐变得不够用,需要扩充空间。下面说明如何在VMWare中增加Linux的文件系统空间。

            我的VMWare中安装的是Fedora 9,不过其它Linux应该大同小异。

            首先在VMWare中调整虚拟硬盘的大小(在“Virtual Machine Settings”的Hardware选项卡里选择“Hard Disk(SCSI)”,在右侧的Utilities里选择Expand进行扩展),也可以直接Add一块新的虚拟硬盘。

            启动Linux,键入df -h查看磁盘信息:

            [plain] view plaincopy
            1. [root@localhost ~]# df -h  
            2. Filesystem            Size  Used Avail Use% Mounted on  
            3. /dev/mapper/VolGroup00-LogVol00  
            4.                        29G   26G  2.3G  92% /  
            5. /dev/sda1             190M   13M  168M   7% /boot  
            6. tmpfs                 506M   48K  506M   1% /dev/shm  
            7. gvfs-fuse-daemon       29G   26G  2.3G  92% /root/.gvfs  

            可以看到新增加的硬盘容量并没有体现在Linux中。使用fdisk -l查看分区表信息。我的信息是:

            [plain] view plaincopy
            1. [root@localhost ~]# fdisk -l  
            2.   
            3. Disk /dev/sda: 42.9 GB, 42949672960 bytes  
            4. 255 heads, 63 sectors/track, 5221 cylinders  
            5. Units = cylinders of 16065 * 512 = 8225280 bytes  
            6. Disk identifier: 0x000f1526  
            7.   
            8.    Device Boot      Start         End      Blocks   Id  System  
            9. /dev/sda1   *           1          25      200781   83  Linux  
            10. /dev/sda2              26        3916    31254457+  8e  Linux LVM  

            注意Disk /Dev/sda有40多G空间(刚刚通过VMWare扩展的),但两个Device /dev/sda1和/dev/sda2加起来只有30多G,有10G空间没有被使用。

            从调整分区表入手,调整分区表用fdisk。这个过程需要人机交互,我在需要输入的地方加注释来说明

            [plain] view plaincopy
            1. [root@localhost ~]# fdisk /dev/sda  
            2.   
            3. The number of cylinders for this disk is set to 5221.  
            4. There is nothing wrong with that, but this is larger than 1024,  
            5. and could in certain setups cause problems with:  
            6. 1) software that runs at boot time (e.g., old versions of LILO)  
            7. 2) booting and partitioning software from other OSs  
            8.    (e.g., DOS FDISK, OS/2 FDISK)  
            9.   
            10. Command (m for help): n       //选择n表示新建分区  
            11. Command action  
            12.    e   extended  
            13.    p   primary partition (1-4)  
            14. e                             //选择e表示建立扩展分区  
            15. Partition number (1-4): 3     //选择3是因为1和2都被占了(/dev/dsa1和/dev/dsa2)  
            16. First cylinder (3917-5221, default 3917):                               //此分区起始柱面,直接按回车,使用默认值  
            17. Using default value 3917  
            18. Last cylinder or +size or +sizeM or +sizeK (3917-5221, default 5221):   //此分区大小,直接按回车,使用默认值(表明包含所有剩余空间)  
            19. Using default value 5221  
            20.   
            21. Command (m for help): n       //在扩展分区基础上新建逻辑分区  
            22. Command action  
            23.    l   logical (5 or over)  
            24.    p   primary partition (1-4)  
            25. l                             //选择l表示新建逻辑分区  
            26. First cylinder (3917-5221, default 3917):                               //直接按回车  
            27. Using default value 3917  
            28. Last cylinder or +size or +sizeM or +sizeK (3917-5221, default 5221):   //直接按回车  
            29. Using default value 5221  
            30.   
            31. Command (m for help): p       //选择p查看新的分区表,发现已经新建成功  
            32.   
            33. Disk /dev/sda: 42.9 GB, 42949672960 bytes  
            34. 255 heads, 63 sectors/track, 5221 cylinders  
            35. Units = cylinders of 16065 * 512 = 8225280 bytes  
            36. Disk identifier: 0x000f1526  
            37.   
            38.    Device Boot      Start         End      Blocks   Id  System  
            39. /dev/sda1   *           1          25      200781   83  Linux  
            40. /dev/sda2              26        3916    31254457+  8e  Linux LVM  
            41. /dev/sda3            3917        5221    10482412+   5  Extended  
            42. /dev/sda5            3917        5221    10482381   83  Linux  
            43.   
            44. Command (m for help): w       //选择w保存  
            45. The partition table has been altered!  
            46.   
            47. Calling ioctl() to re-read partition table.  
            48.   
            49. WARNING: Re-reading the partition table failed with error 16: Device or resource busy.  
            50. The kernel still uses the old table.  
            51. The new table will be used at the next reboot.  
            52. Syncing disks.  

            可以看到新增了/dev/sda3扩展分区和/dev/sda5逻辑分区。如果是新增了一块硬盘,也可以直接新建一个主分区,步骤更简单些。

            键入partprobe命令,这个命令用于在硬盘分区发生改变时,更新Linux内核中读取的硬盘分区表数据。

            [plain] view plaincopy
            1. [root@localhost ~]# partprobe  

            我们使用ext3格式化/dev/sda5这个新的分区

            [plain] view plaincopy
            1. [root@localhost ~]# mkfs -t ext3 /dev/sda5  
            2. mke2fs 1.40.8 (13-Mar-2008)  
            3. Warning: 256-byte inodes not usable on older systems  
            4. Filesystem label=  
            5. OS type: Linux  
            6. Block size=4096 (log=2)  
            7. Fragment size=4096 (log=2)  
            8. 655360 inodes, 2620595 blocks  
            9. 131029 blocks (5.00%) reserved for the super user  
            10. First data block=0  
            11. Maximum filesystem blocks=2684354560  
            12. 80 block groups  
            13. 32768 blocks per group, 32768 fragments per group  
            14. 8192 inodes per group  
            15. Superblock backups stored on blocks:   
            16.     32768, 98304, 163840, 229376, 294912, 819200, 884736, 1605632  
            17.   
            18. Writing inode tables: done                              
            19. Creating journal (32768 blocks): done  
            20. Writing superblocks and filesystem accounting information: done  
            21.   
            22. This filesystem will be automatically checked every 32 mounts or  
            23. 180 days, whichever comes first.  Use tune2fs -c or -i to override.  

            格式化完毕后,要将新的分区加入到LVM(Logical Volume Manager)以便以后管理。
            我们先用pvdisplay命令查看LVM的物理卷信息

            [plain] view plaincopy
            1. [root@localhost ~]# pvdisplay  
            2.   --- Physical volume ---  
            3.   PV Name               /dev/sda2  
            4.   VG Name               VolGroup00  
            5.   PV Size               29.81 GB / not usable 25.93 MB  
            6.   Allocatable           yes   
            7.   PE Size (KByte)       32768  
            8.   Total PE              953  
            9.   Free PE               1  
            10.   Allocated PE          952  
            11.   PV UUID               gjFDfY-J0vK-7YKm-uo32-eiQZ-YO3D-PFbp1A  

            可以看到,只有/dev/sda2加入了LVM的VolGroup00卷组。

            使用我们新的/dev/sda5创建LVM物理卷,命令是pvcreate

            [plain] view plaincopy
            1. [root@localhost ~]# pvcreate /dev/sda5  
            2.   Physical volume "/dev/sda5" successfully created  

            创建成功,再键入pvdisplay查看

            [plain] view plaincopy
            1. [root@localhost ~]# pvdisplay  
            2.   --- Physical volume ---  
            3.   PV Name               /dev/sda2  
            4.   VG Name               VolGroup00  
            5.   PV Size               29.81 GB / not usable 25.93 MB  
            6.   Allocatable           yes   
            7.   PE Size (KByte)       32768  
            8.   Total PE              953  
            9.   Free PE               1  
            10.   Allocated PE          952  
            11.   PV UUID               gjFDfY-J0vK-7YKm-uo32-eiQZ-YO3D-PFbp1A  
            12.      
            13.   "/dev/sda5" is a new physical volume of "10.00 GB"  
            14.   --- NEW Physical volume ---  
            15.   PV Name               /dev/sda5  
            16.   VG Name                 
            17.   PV Size               10.00 GB  
            18.   Allocatable           NO  
            19.   PE Size (KByte)       0  
            20.   Total PE              0  
            21.   Free PE               0  
            22.   Allocated PE          0  
            23.   PV UUID               WiG7f0-jGuq-HCUR-3MCZ-d8V0-rwV9-rEF2wg  

            新的物理卷已经创建,但是没有加入任何卷组。现在我们把它加入VolGroup00卷组

            [plain] view plaincopy
            1. [root@localhost ~]# vgextend VolGroup00 /dev/sda5  
            2.   Volume group "VolGroup00" successfully extended  

            加入成功。键入lvdisplay查看我们当前的逻辑卷信息

            [plain] view plaincopy
            1. [root@localhost ~]# lvdisplay  
            2.   --- Logical volume ---  
            3.   LV Name                /dev/VolGroup00/LogVol00  
            4.   VG Name                VolGroup00  
            5.   LV UUID                nvo0P1-8kmf-f9jJ-X4ii-RHUD-VvwK-AvIm36  
            6.   LV Write Access        read/write  
            7.   LV Status              available  
            8.   # open                 1  
            9.   LV Size                29.25 GB  
            10.   Current LE             936  
            11.   Segments               1  
            12.   Allocation             inherit  
            13.   Read ahead sectors     auto  
            14.   - currently set to     256  
            15.   Block device           253:0  
            16.      
            17.   --- Logical volume ---  
            18.   LV Name                /dev/VolGroup00/LogVol01  
            19.   VG Name                VolGroup00  
            20.   LV UUID                2hi7f1-SPdo-FQdX-s4gE-Y74F-rgzQ-KAO55X  
            21.   LV Write Access        read/write  
            22.   LV Status              available  
            23.   # open                 1  
            24.   LV Size                512.00 MB  
            25.   Current LE             16  
            26.   Segments               1  
            27.   Allocation             inherit  
            28.   Read ahead sectors     auto  
            29.   - currently set to     256  
            30.   Block device           253:1  

            我们要扩展的是名为/dev/VolGroup00/LogVol00的逻辑卷,使用lvextend命令

            [plain] view plaincopy
            1. [root@localhost ~]# lvextend /dev/VolGroup00/LogVol00 /dev/sda5  
            2.   Extending logical volume LogVol01 to 10.47 GB  
            3.   Logical volume LogVol01 successfully resized  

            扩展成功,此时再键入lvdisplay查看信息,显示如下

            [plain] view plaincopy
            1. [root@localhost ~]# lvdisplay  
            2.   --- Logical volume ---  
            3.   LV Name                /dev/VolGroup00/LogVol00  
            4.   VG Name                VolGroup00  
            5.   LV UUID                nvo0P1-8kmf-f9jJ-X4ii-RHUD-VvwK-AvIm36  
            6.   LV Write Access        read/write  
            7.   LV Status              available  
            8.   # open                 1  
            9.   LV Size                39.22 GB  
            10.   Current LE             1255  
            11.   Segments               2  
            12.   Allocation             inherit  
            13.   Read ahead sectors     auto  
            14.   - currently set to     256  
            15.   Block device           253:0  
            16.      
            17.   --- Logical volume ---  
            18.   LV Name                /dev/VolGroup00/LogVol01  
            19.   VG Name                VolGroup00  
            20.   LV UUID                2hi7f1-SPdo-FQdX-s4gE-Y74F-rgzQ-KAO55X  
            21.   LV Write Access        read/write  
            22.   LV Status              available  
            23.   # open                 1  
            24.   LV Size                32.00 MB  
            25.   Current LE             1  
            26.   Segments               1  
            27.   Allocation             inherit  
            28.   Read ahead sectors     auto  
            29.   - currently set to     256  
            30.   Block device           253:1  

            LogVol00扩大了10G。但此时键入df -h,会发现我们的空间还是不能用

            [plain] view plaincopy
            1. [root@localhost ~]# df -h  
            2. Filesystem            Size  Used Avail Use% Mounted on  
            3. /dev/mapper/VolGroup00-LogVol00  
            4.                        29G   26G  2.3G  92% /  
            5. /dev/sda1             190M   13M  168M   7% /boot  
            6. tmpfs                 506M   48K  506M   1% /dev/shm  
            7. gvfs-fuse-daemon       29G   26G  2.3G  92% /root/.gvfs  

            需要用resize2fs工具调整Linux文件系统大小。如果你的Linux内核是2.6及以上版本,可以直接调整,否则需要先umount相关的设备后进行调整。Fedora 9的内核版本是2.6.25。OK,我们直接调整df命令中列出的/dev/mapper/VolGroup00-LogVol00文件系统

            [plain] view plaincopy
            1. [root@localhost ~]# resize2fs /dev/mapper/VolGroup00-LogVol00  
            2. resize2fs 1.40.8 (13-Mar-2008)  
            3. Filesystem at /dev/mapper/VolGroup00-LogVol00 is mounted on /; on-line resizing required  
            4. old desc_blocks = 2, new_desc_blocks = 3  
            5. Performing an on-line resize of /dev/mapper/VolGroup00-LogVol00 to 10280960 (4k) blocks.  
            6. The filesystem on /dev/mapper/VolGroup00-LogVol00 is now 10280960 blocks long.  

            此时再键入df -h命令查看

            [plain] view plaincopy
            1. [root@localhost ~]# df -h  
            2. Filesystem            Size  Used Avail Use% Mounted on  
            3. /dev/mapper/VolGroup00-LogVol00  
            4.                        39G   26G   12G  69% /  
            5. /dev/sda1             190M   13M  168M   7% /boot  
            6. tmpfs                 506M   48K  506M   1% /dev/shm  
            7. gvfs-fuse-daemon       39G   26G   12G  69% /root/.gvfs  

            至此,我们的文件系统扩充完毕。

              esxi Displaying the ARP and Neighbor Discovery cache for VMkernel network interfaces

               

              To review the ARP and ND cache for the VMkernel network interfaces on an ESX or ESXi host, use the local or remote esxcli command. This command is available at the local console of the ESX/ESXi host, and usable remotely from the vSphere Command Line Interface (vCLI) or from the vSphere Management Assistant (vMA) appliance. For installation and/or usage instruction, see the relevant documentation.

              1. Open a console session to the location where the esxcli command is available.
              2. Display the list of known network neighbors in the ARP and ND cache for all VMkernel network interfaces using one of the commands:
                • On ESXi 4.1:

                  esxcli [--server hostname --username root --password rootpassword] network neighbor list
                   
                • On ESXi 5.0:

                  esxcli [--server hostname --username root --password rootpassword] network ip neighbor list
                   
                Note: If running locally on the ESX/ESXi host console, no server hostname or authentication is required for the esxclicommand.

                The output appears similar to:

                Neighbor     Mac Address       vmknic Expiry(sec)
                --------     -----------       ------ -----------
                10.5.6.7     00:50:56:9a:00:7a vmk0   1200
                10.5.6.8     00:50:56:9a:00:57 vmk0   1197
                10.5.6.9     00:50:56:9a:00:3e vmk0   277
                10.200.1.10  00:50:56:9a:01:09 vmk1   979

                ESXi S.M.A.R.T. health monitoring for hard drives

                In ESXi 5.1, VMware added S.M.A.R.T. functionality to monitor hard drive health. The S.M.A.R.T. feature records various operation parameters from physical hard drives attached to a local controller. The feature is part of the firmware on the circuit board of a physical hard disk (HDD and SSD).


                To read the current data from a disk:



                1. Determine the device parameter to use by running the command:

                  esxcli storage core device list
                   
                2. Read the data from the device:

                  esxcli storage core device smart get -d device

                  Where device is a value found in step 1.
                   
                3. The expected output is a list with all SCSI devices seen by the ESXi host. For example:

                  t10.ATA_____WDC_WD2502ABYS2D18B7A0________________________WD2DWCAT1H751520

                  Note: External FC/iSCSI LUNs or virtual disks from a RAID controller might not report a S.M.A.R.T. status.

                This table breaks down some example output:



                Parameter Value Threshold Worst
                Health Status OK N/A N/A
                Media Wearout Indicator 0 0 0
                Write Error Count N/A N/A N/A
                Read Error Count 118 50 118
                Power-on Hours 0 0 0
                Power Cycle Count 100 0 100
                Reallocated Sector Count 100 3 100
                Raw Read Error Rate 118 50 118
                Drive Temperature 27 0 34
                Driver Rated Max Temperature N/A N/A N/A
                Write Sectors TOT Count N/A N/A N/A
                Read Sectors TOT Count N/A N/A N/A
                Initial Bad Block Count N/A N/A N/A

                Note: A physical hard drive can have up to 30 different attributes (the example above supports only 13). For more information, seeHow does S.M.A.R.T. function of hard disks Work?

                Note: The preceding link was correct as of December 7, 2012. If you find the link is broken, provide feedback and a VMware employee will update the link.


                A raw value can have two possible results:


                • A number between 0-253
                • A word (for example, N/A or OK)

                Column descriptions

                Note: The values returned and their meaning for each of these columns can vary by manufacturer. For more information, please consult your hardware supplier.

                • Parameter

                  This is a translation from the attribute ID to human-readable text. For example:

                  hex 0xE7 = decimal 231 = "Drive Temperature"

                  For more information, see the Known ATA S.M.A.R.T. attributes section of the S.M.A.R.T. Wikipedia article.

                  Note: The preceding link was correct as of December 7, 2012. If you find the link is broken, provide feedback and a VMware employee will update the link.
                   
                • Value

                  This is the raw value reported by the disk. To illustrate a simple Value using the example above, the Drive Temperature is reported as 27, which means 27 degrees Celsius.

                  Value can either be a number (0-253) or a word (for example, N/A or OK).
                   
                • Threshold

                  The (failure) limit for the attribute.
                   
                • Worst

                  The highest Value ever recorded for the parameter.

                smartd daemon

                ESXi 5.1 also has the /sbin/smartd daemon in the DCUI installed. This tool does not have any command line switches or interaction with the console. If you run the command in the shell, a S.M.A.R.T. status is reported in the /var/log/syslog.log file.

                For example:

                XXXX-XX-28T10:15:12Z smartd: [warn] t10.ATA_____SanDisk_SDSSDX120GG25___________________120506403552________: below MEDIA WEAROUT threshold (0)
                XXXX-XX-28T10:15:12Z smartd: [warn] t10.ATA_____SanDisk_SDSSDX120GG25___________________120506403552________: above TEMPERATURE threshold (27 > 0)
                XXXX-XX-28T10:15:12Z smartd: [warn] t10.ATA_____WDC_WD2502ABYS2D18B7A0________________________WD2DWCAT1H751520: above TEMPERATURE threshold (113 > 0)


                Notes:


                • You can stop the daemon by typing Ctrl+c.
                • Logged events should be viewed with caution. As can be seen in the example, all three warnings are irrelevant. The output can vary greatly between manufacturers and disk models.
                  Subscribe to this RSS feed
                  Notice: Undefined offset: 1 in /var/www/vhosts/shan.info/httpdocs/templates/gk_publisher/html/pagination.php on line 18

                  Notice: Undefined offset: 1 in /var/www/vhosts/shan.info/httpdocs/templates/gk_publisher/html/pagination.php on line 34

                  Notice: Undefined offset: 2 in /var/www/vhosts/shan.info/httpdocs/templates/gk_publisher/html/pagination.php on line 34

                  Notice: Undefined offset: 3 in /var/www/vhosts/shan.info/httpdocs/templates/gk_publisher/html/pagination.php on line 34

                  Warning: count(): Parameter must be an array or an object that implements Countable in /var/www/vhosts/shan.info/httpdocs/templates/gk_publisher/html/com_k2/templates/default/user.php on line 145

                  Notice: Undefined offset: 1 in /var/www/vhosts/shan.info/httpdocs/templates/gk_publisher/html/pagination.php on line 18

                  Notice: Undefined offset: 1 in /var/www/vhosts/shan.info/httpdocs/templates/gk_publisher/html/pagination.php on line 34

                  Notice: Undefined offset: 2 in /var/www/vhosts/shan.info/httpdocs/templates/gk_publisher/html/pagination.php on line 34

                  Notice: Undefined offset: 3 in /var/www/vhosts/shan.info/httpdocs/templates/gk_publisher/html/pagination.php on line 34