12月 17

CentOS上使用python和C开发leveldb库学习测试

  测试环境为阿里云,单核Intel(R) Xeon(R) CPU E5-2420 0 @ 1.90GHz,内存512M,最基础的配置,测试系统为CentOS 64位。测试数据就是把双色球所有组合进行排列,共17721088行,文件名为di.txt,体积363421k。leveldb推荐使用SSD硬盘,当前虚拟硬盘的速度肯定是不行,此处只是学习,测试对比。

官方网址
http://code.google.com/p/py-leveldb/

安装命令
svn checkout http://py-leveldb.googlecode.com/svn/trunk/ py-leveldb-read-only
cd py-leveldb-read-only/
# 需要注意,下面的脚本里需要使用git获取leveldb。所以要把git客户端装好。
./compile_leveldb.sh
python setup.py build
python setup.py install

  python遇到的问题
  报错
../snappy-read-only/snappy.h:45:33: error: snappy-stubs-public.h

  解决:
yum -y install autoconf automake libtool
再次编译还是不成功,手动安装压缩工具。
网址:http://code.google.com/p/snappy/
使用命令
wget http://snappy.googlecode.com/files/snappy-1.1.1.tar.gz
./configure –enable-shared=no –enable-static=yes
make CXXFLAGS=’-g -O2 -fPIC’
make install
再次安装编译py-leveldb通过。

  C语言编译问题
  报错
/usr/bin/ld: cannot find -lleveld
collect2: ld 返回 1

  解决
将.so .a拷贝到系统lib目录中,测试系统64位,直接拷贝到lib64中。
cp libleveldb.* /usr/lib64/

python测试部分
顺序写代码

#!/bin/python
#-*- coding:utf-8 -*-
# Filename:  
# Revision:    
# Date:        2013-12-14
# Author:      simonzhang
# web:         www.simonzhang.net
# Email:       simon-zzm@163.com
### END INIT INFO
import leveldb
import time

db = leveldb.LevelDB('./data')

# multiple put/delete applied atomically, and committed to disk 
#batch = leveldb.WriteBatch()
f = open('di.txt')
num = 0
start = time.time()
for i in f:
    if i[-1] == '\n':
        data =  i[:-1]
    else:
        data = i
    num += 1
    db.Put("%s" % num, data)
end = time.time()
print "use sec %s" % (end-start)

批量写代码

#!/bin/python
#-*- coding:utf-8 -*-
# Filename:  
# Revision:    
# Date:        2013-12-14
# Author:      simonzhang
# web:         www.simonzhang.net
# Email:       simon-zzm@163.com
### END INIT INFO
import leveldb
import time

db = leveldb.LevelDB('./data')

batch = leveldb.WriteBatch()
f = open('di.txt')
num = 0
start = time.time()
for i in f:
    if i[-1] == '\n':
        data =  i[:-1]
    else:
        data = i
    num += 1
    batch.Put("%s" % num, data)
    # 因为内存太小,每5万行写入一次
    if ((num % 50000) == 0) or (num == 17721087):
        db.Write(batch, sync = True)
        batch = leveldb.WriteBatch()
end = time.time()
print "use sec %s" % (end-start)

随机读1000万次代码:

#!/bin/python
#-*- coding:utf-8 -*-
# Filename:  
# Revision:    
# Date:        2013-12-14
# Author:      simonzhang
# web:         www.simonzhang.net
# Email:       simon-zzm@163.com
### END INIT INFO
import leveldb
from random import randint
import time

db = leveldb.LevelDB('./data')
start = time.time()
for i in xrange(10000000):
    num = randint(1, 10000000)
    try:
        v =  db.Get("%s" % num)
        print v
    except:
        pass
end = time.time()
print "use sec %s" % (end-start)

测试结果
# python write_seq.py
use sec 329.217786074
每秒写入53827

# python write_bacth.py
use sec 173.626176119
每秒写入102064

# python read.py
use sec 288.070755005
每秒随机读取34713

C部分代码,为了方便,我把两个代码分开写。
C顺序写入

// Filename: 
// Revision:   
// Date:        2013-12-14
// Author:      simonzhang
// web:         www.simonzhang.net
// Email:       simon-zzm@163.com
// END INIT INFO
#include 
#include 
#include 
#include 
#include "leveldb/c.h"

char *itoa(int value, char *string, int radix)
{
    int rt=0;
    if(string==NULL)
        return NULL;
    if(radix<=0 || radix>30)
        return NULL;
    rt = snprintf(string, radix, "%d", value);
    if(rt>radix)
        return NULL;
    string[rt]='\0';
    return string;
}

int main()
{
    leveldb_t *db;
    leveldb_options_t *options;
    leveldb_readoptions_t *roptions;
    char *err = NULL;
    char *read;
    size_t read_len;
    time_t start_time, end_time;
    // write file
    FILE *di;
    if((di=fopen("di.txt","r"))==NULL)
    {
       printf("can't open!\n");
       return -1;
    }
    // OPEN leveldba
    options = leveldb_options_create();
//    leveldb_options_set_error_if_exists(options, 1);
//    leveldb_options_set_cache(options, cache);
//    leveldb_options_set_env(options, env);
//    leveldb_options_set_info_log(options, NULL);
//    leveldb_options_set_write_buffer_size(options, 100000);
//    leveldb_options_set_paranoid_checks(options, 1);
//    leveldb_options_set_max_open_files(options, 4096);
//    leveldb_options_set_block_size(options, 1024);
//    leveldb_options_set_block_restart_interval(options, 8);
//    leveldb_options_set_compression(options, leveldb_no_compression);
    leveldb_options_set_compression(options, leveldb_snappy_compression);
    leveldb_options_set_create_if_missing(options, 1);
    db = leveldb_open(options, "data", &err);

    if (err != NULL) {
      fprintf(stderr, "Open fail.\n");
      return(1);
    }
    leveldb_free(err);
    err = NULL;
    roptions = leveldb_readoptions_create();
    // start read
    start_time = time(NULL);
    int X=99,Y=15000000; //X为起始值 Y为终止值
    int _rand;//随机数
    char s[8];
    srand( (unsigned)time( NULL ) );
    for (int i = 0; i<10000000; i++)
    {
       _rand = rand()%(Y-X+1)+X;
       itoa(_rand,s,8);
       read = leveldb_get(db, roptions, s, strlen(s), &read_len, &err);
       //printf("%s\n", read);
       if (err != NULL) {
           fprintf(stderr, "Read fail.\n");
           return(1);
         }
        leveldb_free(err);
        free(read);
    }
    // CLOSE`
    leveldb_close(db);
    end_time = time(NULL);
    printf("%ld\n", end_time-start_time);
    return 0;
}

C 1000万次随机读

// Filename: 
// Revision:   
// Date:        2013-12-14
// Author:      simonzhang
// web:         www.simonzhang.net
// Email:       simon-zzm@163.com
// END INIT INFO
#include 
#include 
#include 
#include 
#include "leveldb/c.h"

char *itoa(int value, char *string, int radix)
{
    int rt=0;
    if(string==NULL)
        return NULL;
    if(radix<=0 || radix>30)
        return NULL;
    rt = snprintf(string, radix, "%d", value);
    if(rt>radix)
        return NULL;
    string[rt]='\0';
    return string;
}

int main()
{
    leveldb_t *db;
    leveldb_options_t *options;
    leveldb_writeoptions_t *woptions;
    char *err = NULL;
    time_t start_time, end_time;
    // write file
    FILE *di;
    if((di=fopen("di.txt","r"))==NULL)
    {
       printf("can't open!\n");
       return -1;
    }
    // OPEN leveldba
    options = leveldb_options_create();
//    leveldb_options_set_error_if_exists(options, 1);
//    leveldb_options_set_cache(options, cache);
//    leveldb_options_set_env(options, env);
//    leveldb_options_set_info_log(options, NULL);
//    leveldb_options_set_write_buffer_size(options, 100000);
//    leveldb_options_set_paranoid_checks(options, 1);
//    leveldb_options_set_max_open_files(options, 4096);
//    leveldb_options_set_block_size(options, 1024);
//    leveldb_options_set_block_restart_interval(options, 8);
//    leveldb_options_set_compression(options, leveldb_no_compression);
    leveldb_options_set_compression(options, leveldb_snappy_compression);
    leveldb_options_set_create_if_missing(options, 1);
    db = leveldb_open(options, "data", &err);

    if (err != NULL) {
      fprintf(stderr, "Open fail.\n");
      return(1);
    }
    leveldb_free(err);
    err = NULL;
    woptions = leveldb_writeoptions_create();
    // start write  
    start_time = time(NULL);
    char ch, str[30];
    int num=0, c=0;
    ch = fgetc(di);
    while(ch!=EOF)
    {
        if (ch == '\n') 
        {
            char s[10];
            itoa(num,s,10);
            leveldb_put(db, woptions, s, strlen(s), str, strlen(str), &err);
            if (err != NULL) {
                fprintf(stderr, "Write fail.\n");
                return(1);
            }
            memset(str,'\0',sizeof(str));
            c = 0;
            num += 1;
        }
        else
        {
            str[c] = ch;
            c += 1;
        }
        ch = fgetc(di);
    }
    fclose(di);
    // CLOSE
    leveldb_close(db);
    end_time = time(NULL);
    printf("%ld\n", end_time-start_time);
    return 0;
}

测试
C顺序写入
编译
gcc -Wall -std=c99 write-leveldb.c -lleveldb -O3 -o write-leveldb
结果
# ./write-leveldb
225
每秒钟处理78760

C 1000万次随机读
编译
gcc -Wall -std=c99 read-leveldb.c -lleveldb -O3 -o read-leveldb
结果
# ./read-leveldb
143
每秒处理69930

  写入过程CPU肯定是全部跑满。使用snappy压缩,所以写入data目录为175M,压缩了一半。
  随机读将CPU跑满。python内存占用23%。C语言占用内存最终增加到39%。
  之后又做到了一个测试,硬件内存只有512M,硬盘数据插入826M。使用python代码再次随机读取1000万次,使用347.94秒,每秒随机读28740。所以数据超出物理内存不会出错只是速度下降。
  还有问题一没有测试,leveldb默认的每块2M如果64G则数据文件65536个,达到系统打开文件最大数,不知道会不会出问题。并且在同一个目录下文件过多也会对系统改造成一定压力,不知道是否会有影响。推荐使用办法把单块体积加大,此效率也没有测试。
  还有一点说明,使用pypy做了测试,效果不如python,具体原因没有详查。

12月 04

python和C统计jpeg图片RGB值的效率

  测试硬件pcduino(arm),操作系统ubuntu 3.4.29+ armv7l GNU/Linux。测试文件,图片jpeg,大约1.3M。测试内容统计图片的RGB里的值,存在数据库里将来看看能不能做搜索用。
  先是用python,开发速度非常快,但是效率有点问题,所以有用C做了一下。以下是代码和两次的测试结果。C使用的jpeglib库8.0版本。

python代码

#!/bin/python
#-*- coding:utf-8 -*-
# Filename:   
# Revision:    1.0
# Date:        2013-12-04
# Author:      simonzhang
# web:         www.simonzhang.net
# Email:       simon-zzm@163.com
### END INIT INFO
from PIL import Image
# 对RGB简历字典
R = {}
G = {}
B = {}
# 初始化值
for i in xrange(256):
    R[i] = 0
    G[i] = 0
    B[i] = 0
# 大图片需要处理很长时间,所以要用python可以按比例将图缩小
a = Image.open('123.jpg')
#a = a.resize((int(a.size[0] * 0.1), int(a.size[1] * 0.1)), Image.ANTIALIAS)
x = a.size[0]
y = a.size[1]

# 循环处理每个像素点的RGB值
for lx in xrange(x):
    for ly in xrange(y):
        rgb = a.getpixel((lx, ly))
        R[rgb[0]] += 1
        G[rgb[1]] += 1
        B[rgb[2]] += 1
# 打印最终结果
print sorted(R.items(), key=lambda R:R[1],reverse=True)[0]
print sorted(G.items(), key=lambda G:G[1],reverse=True)[0]
print sorted(B.items(), key=lambda B:B[1],reverse=True)[0] 

C代码

/*
*Filename:   
* Revision:    1.0
* Date:        2013-12-04
* Author:      simonzhang
* web:         www.simonzhang.net
* Email:       simon-zzm@163.com
*/
#include 
#include 
#include 

/* we will be using this uninitialized pointer later to store raw, uncompressd image */
//unsigned char *raw_image = NULL;
/* */
int max_num;

/* find max values*/
int sort_values(int arr[255])
{
    int largest1, largest2, temp;
    max_num = 0;
    largest1 = arr[0];
    largest2 = arr[1];
    if (largest1 < largest2)
    {
        temp = largest1;
        largest1 = largest2;
        largest2 = temp;
    }
    int i;
    for (i = 2; i < 255; i++)
    {
        if (arr[i] >= largest1)
        {
            largest2 = largest1;
            largest1 = arr[i];
            max_num = i;
        }
        else if (arr[i] > largest2)
        {
            largest2 = arr[i];
        }
    }
    return largest1;
}


read_jpeg_file(char *filename)
{
    /* these are standard libjpeg structures for reading(decompression) */
    struct jpeg_decompress_struct cinfo;
    struct jpeg_error_mgr jerr;
    /* libjpeg data structure for storing one row, that is, scanline of an image */
    JSAMPROW row_pointer[1];
    FILE *infile = fopen( filename, "rb" );
    unsigned long location = 0;
    int i = 0;
    if ( !infile )
    {
        printf("Error opening jpeg file %s\n!", filename );
        return -1;
    }
    /* here we set up the standard libjpeg error handler */
    cinfo.err = jpeg_std_error( &jerr );
    /* setup decompression process and source, then read JPEG header */
    jpeg_create_decompress( &cinfo );
    /* this makes the library read from infile */
    jpeg_stdio_src( &cinfo, infile );
    /* reading the image header which contains image information */
    jpeg_read_header( &cinfo, TRUE );
    /* Uncomment the following to output image information, if needed. */
    /* Start decompression jpeg here */
    jpeg_start_decompress( &cinfo );
    /* allocate memory to hold the uncompressed image */
    //raw_image = (unsigned char*)malloc( cinfo.output_width*cinfo.output_height*cinfo.num_components );
    /* now actually read the jpeg into the raw buffer */
    row_pointer[0] = (unsigned char *)malloc( cinfo.output_width*cinfo.num_components );
    /* read one scan line at a time */
    int r[255], g[255], b[255];
    for(i=0;i<255;i++)
    {
        r[i] = 0;
        g[i] = 0;
        b[i] = 0;
    }
    while( cinfo.output_scanline < cinfo.image_height )
    {
        jpeg_read_scanlines( &cinfo, row_pointer, 1 );
        for( i=0; i

先是测试python部分
#time python countrgb.py

(12, 510858)
(17, 429677)
(9, 662996)

real 11m4.009s
user 10m42.200s
sys 0m1.090s

开始测试C的部分
首先优化编译一下
#gcc countrgb.c -l jpeg -O3 -o countrgb
#time ./countrgb 123.jpg
12,510858
17,429677
9,662996

real 0m0.750s
user 0m0.730s
sys 0m0.010s

  两次统计结果相同,说明统计方法没有问题。python用了11分钟,C用了0.75秒,看来真的不是一般的快了。C代码编译后的文件8K,静态编译500多K。不过我是喜欢用python,开发速度快,结构清晰。

10月 16

用Nginx为uwsgi后端图片服务器做缓存服务

  很早处理的问题,现在拿出来做个记录。
  需求:后端图片服务处理各种格式、尺寸的图片。使用python处理,系统为Django+uwsgi(其实直接用tornado更好,也更方便)。前端需要做缓存,直接使用nginx。当前问题nginx和uwsgi连接后就不能使用cache服务了,nginx的proxy cache服务是在upstream的流转发过程中实现。所以要做个内部转发。nginx部分配置如下:

日志部分,添加缓存记录方将来统计击中率,参数为upstream_cache_status。
日志的中表示说明
MISS 未命中,请求被传送到后端
HIT 缓存命中
EXPIRED 缓存已经过期请求被传送到后端
UPDATING 正在更新缓存,将使用旧的应答
STALE 后端将得到过期的应答

log_format main ‘$remote_addr – $remote_user [$time_local] “$request” ‘
‘$status $body_bytes_sent “$http_referer” ‘
‘”$http_user_agent” “$http_x_forwarded_for” “$request_time” ‘
‘”$upstream_cache_status”‘;

转发的主要部分

upstream  local_img {
   server localhost:81;
   }
server{
       listen       81;
       server_name 127.0.0.1;
       location / {
                uwsgi_pass   127.0.0.1:9000;
                client_max_body_size   10m;
                include       uwsgi_params;
                access_log  off;
                autoindex off;
                }
    }

server {
       listen       80;
       server_name imgtest.simonzhang.net ;
       location / {
                #proxy cache stat
                proxy_redirect          off;
                proxy_set_header        Host            $host;
                proxy_set_header        X-Real-IP       $remote_addr;
                proxy_set_header        X-Forwarded-For $proxy_add_x_forwarded_for;
                proxy_connect_timeout   15;
                proxy_send_timeout      45;
                proxy_read_timeout      45;
                proxy_buffer_size       128k;
                proxy_buffers           4 128k;
                proxy_busy_buffers_size 128k;
                proxy_temp_file_write_size 128k;
                proxy_cache cache_one;
                # 成功浏览过的图片5天过期
                proxy_cache_valid 200 5d;
                proxy_cache_valid 404 304 1m;
                proxy_cache_key $host$uri$is_args$args;
                #add_header X-Cache $upstream_cache_status;
                proxy_pass http://local_img;
                #proxy cache end
                }
    }

  重启nginx服务后查看日志,日志中已经有了HIT的日志,日志显示使用处理耗时0.000ms。收工。

10月 15

无聊的内存操作

  今天一哥们说他内存曲线是均匀锯齿状。这对python太简单了,就是对内存添写和释放就行。我无聊之下写了下面的代码。

import StringIO
import time

for i in range(100):
    context = a * 2048 * 100000
    a=StringIO.StringIO(context)
    time.sleep(2)
    a.close()
    context = b * 1024 * 10
    a=StringIO.StringIO(context)
    time.sleep(1)
    a.close()

内存图形如下:
内存图形