网络摄像头3 cmos ov9650,plugins/input_s3c2410/
先贴出plugins/input_s3c2410/s3c2410.h里的几个重要的macro和struct
如果使用如下指令启动的mjpg_streamer
搜索"见下面"取得线索。
***********************************************************init***************************************************************************
在input_s3c2410.c里,input_init源码如下
对设备文件/dev/camera进行读写等操作的函数均在此文件内
init_s3c2410()函数接收的
第1个参数是指针struct vdIn *vd
第2个参数是设备名
第3,4个参数是图像尺寸
***********************************************************run**************************************************************************
在plugins/input_s3c2410/input_s3c2410.c,input_run源码如下
1.执行一次.mjpg_streamer会,会创建一个线程去从设备读数据。并将数据处理后放在全局变量global里。而不管有没有客户端从global中读取。
2.pthread_detach(cam);分离线程,这个线程的作用是从设备读数据到全局变量,没数据返回给主线程,所以主线程没必要等待此线程的返回。这样的话,这个线程结束时(何时?主进程结束比如kill xxx,比如pglobal->stop==1)就可以自动被系统回收。
在plugins/input_s3c2410/input_s3c2410.c,线程函数cam_thread源码如下
在plugins/input_s3c2410/s3c2410.c,s3c2410_Grab源码如下,这个是从设备读数据的关键部分
在plugins/input_s3c2410/s3c2410.c,
在根目录的simplified_jpeg_encoder.c,
在根目录的simplified_jpeg_encoder.c,
/* in case default setting */ #define WIDTH 1280 #define HEIGHT 1024 #define BPPIN 8 #define OUTFRMNUMB 1 #define NB_BUFFER 4 struct frame_t{ char header[5]; int nbframe; //记录当前是第几帧 double seqtimes; //记录转换完当前帧时的时间。 int deltatimes; //记录从开始读取数据到数据转换完成所用时间 int w; int h; int size; //记录原始数据帧经过转换后的图像大小,即convertframe()的返回值 int format; //记录当前帧格式 unsigned short bright; unsigned short contrast; unsigned short colors; unsigned short exposure; unsigned char wakeup; int acknowledge; } __attribute__ ((packed)); /* 此结构体用于描述图像数据帧(指原始数据转换后的)的信息,从s3c2410_Grab()的一些片段可看出。 struct frame_t *headerframe; jpegsize= convertframe(vd->ptframe[vd->frame_cour]+ sizeof(struct frame_t), vd->pFramebuffer, vd->hdrwidth, vd->hdrheight, vd->formatIn, qualite, vd->framesizeIn); headerframe=(struct frame_t*)vd->ptframe[vd->frame_cour]; headerframe->seqtimes = ms_time(); headerframe->deltatimes=(int)(headerframe->seqtimes-timecourant); headerframe->w = vd->hdrwidth; headerframe->h = vd->hdrheight; headerframe->size = (( jpegsize < 0)?0:jpegsize);; headerframe->format = vd->formatIn; headerframe->nbframe = frame++; */ struct vdIn { int fd; char *videodevice ; //设备名字 unsigned char *pFramebuffer; //存放从驱动中读取的原始数据,vd->pFramebuffer=(unsigned char *) malloc ((size_t) vd->framesizeIn ); unsigned char *ptframe[OUTFRMNUMB]; //用于在转换convertframe()成图像时的参数。#define OUTFRMNUMB 1,所以仅有一个元素ptframe[0] unsigned char *mem[NB_BUFFER]; int framelock[OUTFRMNUMB]; pthread_mutex_t grabmutex; //并发控制多个读驱动线程对全局vd数据的访问。但问题是好像只能有一个读驱动线程,所以觉得可以省去的。 int framesizeIn ; //记录每一帧多少字节,是每次要从驱动读取的字节数。vd->framesizeIn=width*height*2; //RGB565 volatile int frame_cour; //当前帧索引。总是0(#define OUTFRMNUMB 1) int bppIn; int hdrwidth; //图像width int hdrheight; //图像height int formatIn; int signalquit; struct v4l2_capability cap; struct v4l2_format fmt; struct v4l2_buffer buf; struct v4l2_requestbuffers rb; int grayscale; //图像灰度 uint32_t quality; //图像质量,用于将一帧原始数据转化成图像convertframe()时的参数。 }; /* 此结构体用于保存从设备读取的原始数据 */在plugins/input_s3c2410/input_s3c2410.c中声明了几个指针,全局的但只是在本input plugin即input_s3c2410目录里使用
#define INPUT_PLUGIN_NAME "S3C2410 embedded camera" #define MAX_ARGUMENTS 32 /* private functions and variables to this plugin */ pthread_t cam;//从设备读数据的线程标识符 struct vdIn *videoIn; static globals *pglobal;//此指针会指向在全局的mjpg_streamer.c中定义的global,以供本模块访问全局buf
如果使用如下指令启动的mjpg_streamer
./mjpg_streamer -o "output_http.so -w ./www" -i "input_s3c2410.so -d /dev/camera"则在mjpg_streamer.c中的两条指令
global.in.init(&global.in.param) global.in.run()分别是执行input_s3c2410.c中的
int input_init(input_parameter *param) // param.parameter_string="-d /dev/camera" int input_run(void)
搜索"见下面"取得线索。
***********************************************************init***************************************************************************
在input_s3c2410.c里,input_init源码如下
/****************************************************************************** Description.: This function initializes the plugin. It parses the commandline- parameter and stores the default and parsed values in the appropriate variables. Input Value.: param contains among others the command-line string Return Value: 0 if everything is fine 1 if "--help" was triggered, in this case the calling programm should stop running and leave. ******************************************************************************/ int input_init(input_parameter *param) { char *dev = "/dev/video0", *s; //默认设备名是dev = "/dev/video0" int width=640, height=512, i; int argc=1; char *argv[MAX_ARGUMENTS]={NULL}; uint32_t jpg_quality=1024; int grayscale=0; /* convert the single parameter-string to an array of strings */ argv[0] = INPUT_PLUGIN_NAME; if ( param->parameter_string != NULL && strlen(param->parameter_string) != 0 ) { char *arg=NULL, *saveptr=NULL, *token=NULL; arg=(char *)strdup(param->parameter_string); if ( strchr(arg, ' ') != NULL ) { token=strtok_r(arg, " ", &saveptr); if ( token != NULL ) { argv[argc] = strdup(token); argc++; while ( (token=strtok_r(NULL, " ", &saveptr)) != NULL ) { argv[argc] = strdup(token); argc++; if (argc >= MAX_ARGUMENTS) { IPRINT("ERROR: too many arguments to input plugin\n"); return 1; } } } } } /* show all parameters for DBG purposes */ for (i=0; i<argc; i++) { DBG("argv[%d]=%s\n", i, argv[i]); /* 如果使用:-i "input_s3c2410.so -d /dev/camera" 则 DBG(input_s3c2410.c, input_init(), 95): argv[0]=S3C2410 embedded camera DBG(input_s3c2410.c, input_init(), 95): argv[1]=-d DBG(input_s3c2410.c, input_init(), 95): argv[2]=/dev/camera */ } /* parse the parameters */ reset_getopt(); while(1) { int option_index = 0, c=0; struct option long_options[] = \ { {"help", no_argument, 0, 'h'}, {"device", required_argument, 0, 'd'}, {"resolution", required_argument, 0, 'r'}, {"quality", required_argument, 0, 'q'}, {"grayscale", no_argument, 0, 'g'}, {0, 0, 0, 0} }; c = getopt_long(argc, argv, "hd:r:q:g", long_options, &option_index); /* no more options to parse */ if (c == -1) break; /* dispatch the given options */ switch (c) { /* d, device */ case 'd': DBG("case d\n"); dev = strdup(optarg); break; //指定灰度,默认0 case 'g': grayscale=1; break; /* r, resolution */ //指定尺寸如 -r 320x240 ,默认640x512 case 'r': DBG("case r\n"); width = -1; height = -1; /* parse value as decimal value */ width = strtol(optarg, &s, 10); height = strtol(s+1, NULL, 10); break; /* q, quality */ //指定图片质量 默认1024 case 'q': DBG("case q\n"); jpg_quality = atoi(optarg); break; /* h, help */ case 'h': default: DBG("default case, h\n"); help(); return 1; } } /* keep a pointer to the global variables */ pglobal = param->global; /* allocate webcam datastructure */ videoIn = malloc(sizeof(struct vdIn)); //为模块内全局指针videoIn分配内存 if ( videoIn == NULL ) { IPRINT("not enough memory for videoIn\n"); exit(EXIT_FAILURE); } memset(videoIn, 0, sizeof(struct vdIn)); //清0 DBG("initializing s3c2410 device\n"); /* display the parsed values */ IPRINT("Using V4L2 device.: %s\n", dev); IPRINT("Desired Resolution: %i x %i\n", width, height); IPRINT("Grayscale mode: %s\n",grayscale?"on":"off"); videoIn->grayscale=grayscale; videoIn->quality=jpg_quality; /* open video device and prepare data structure */ if (init_s3c2410 (videoIn, dev, width, height) != 0) //调用实际的初始化函数对设备初始化,见下面 { IPRINT("init_s3c2410 failed\n"); closelog(); exit(EXIT_FAILURE); } return 0; }在plugins/input_s3c2410/s3c2410.c可以看到init_s3c2410 (videoIn, dev, width, height)的源码
对设备文件/dev/camera进行读写等操作的函数均在此文件内
init_s3c2410()函数接收的
第1个参数是指针struct vdIn *vd
第2个参数是设备名
第3,4个参数是图像尺寸
int init_s3c2410 (struct vdIn *vd, char *device, int width, int height) { int err = -1; int f; int i; if (vd == NULL || device == NULL) return -1; if (width == 0 || height == 0) return -1; vd->videodevice=strdup(device);//取得设备名,以便open vd->framesizeIn=width*height*2; //RGB565 vd->hdrwidth=width; vd->hdrheight=height; //printf("Allocating frame:%dx%d\n",width,height); vd->pFramebuffer=(unsigned char *) malloc ((size_t) vd->framesizeIn ); //just in case vd->formatIn=0; DBG("Opening device\n"); if ((vd->fd = open( vd->videodevice, O_RDWR)) == -1)//打开设备 /dev/camera exit_fatal ("ERROR opening V4L interface"); DBG("Allocating input buffers\n"); /* allocate the 4 frames output buffer */ for (i = 0; i < OUTFRMNUMB; i++) { vd->ptframe[i] = NULL; vd->ptframe[i] = (unsigned char *) malloc ((size_t) vd->framesizeIn+sizeof(struct frame_t) ); vd->framelock[i] = 0; } vd->frame_cour = 0; pthread_mutex_init (&vd->grabmutex, NULL);//初始化互斥量,以便后面用到 printf("Allocated\n"); return 0; }
***********************************************************run**************************************************************************
在plugins/input_s3c2410/input_s3c2410.c,input_run源码如下
/****************************************************************************** Description.: spins of a worker thread Input Value.: - Return Value: always 0 ******************************************************************************/ int input_run(void) { pglobal->buf = malloc(videoIn->framesizeIn); if (pglobal->buf == NULL) { fprintf(stderr, "could not allocate memory\n"); exit(EXIT_FAILURE); } pthread_create(&cam, 0, cam_thread, NULL);//创建线程,线程函数见下面 pthread_detach(cam); return 0; }说明
1.执行一次.mjpg_streamer会,会创建一个线程去从设备读数据。并将数据处理后放在全局变量global里。而不管有没有客户端从global中读取。
2.pthread_detach(cam);分离线程,这个线程的作用是从设备读数据到全局变量,没数据返回给主线程,所以主线程没必要等待此线程的返回。这样的话,这个线程结束时(何时?主进程结束比如kill xxx,比如pglobal->stop==1)就可以自动被系统回收。
在plugins/input_s3c2410/input_s3c2410.c,线程函数cam_thread源码如下
/****************************************************************************** Description.: this thread worker grabs a frame and copies it to the global buffer Input Value.: unused Return Value: unused, always NULL ******************************************************************************/ void *cam_thread( void *arg ) { int iframe = 0; unsigned char *pictureData = NULL; struct frame_t *headerframe; int r; /* set cleanup handler to cleanup allocated ressources */ pthread_cleanup_push(cam_cleanup, NULL); while( !pglobal->stop ) { /* grab a frame */ r=s3c2410_Grab( videoIn );//从驱动抓取一帧原始数据并编码,见下面 if( r < 0 ) { IPRINT("Error grabbing frames\n"); exit(EXIT_FAILURE); } if(!r) //not captured { //sleep(0); pthread_yield(); continue; } iframe=(videoIn->frame_cour +(OUTFRMNUMB-1))% OUTFRMNUMB; //循环索引 //由于#define OUTFRMNUMB 1,所以iframe总是0 videoIn->framelock[iframe]++; //这句和下面的那句--貌似没怎么用用,注释掉也可以 headerframe=(struct frame_t*)videoIn->ptframe[iframe]; //指向转换后的图像数据 pictureData = videoIn->ptframe[iframe]+sizeof(struct frame_t); //指向数据区。 videoIn->framelock[iframe]--; /* copy JPG picture to global buffer */ pthread_mutex_lock( &pglobal->db ); //写数据前上锁(互斥量,互斥锁),以防止客户端线程此时去读数据。 //如果此时客户端线程在持有互斥锁,则该线程阻塞,直到锁可用。 pglobal->size = get_jpegsize(pictureData, headerframe->size); memcpy(pglobal->buf, pictureData, pglobal->size); //全局变量,写入数据大小pglobal->size,数据pglobal->buf /* signal fresh_frame */ pthread_cond_broadcast(&pglobal->db_update); //有新数据的信号 pthread_mutex_unlock( &pglobal->db ); //释放互斥量 } DBG("leaving input thread, calling cleanup function now\n"); pthread_cleanup_pop(1); return NULL; } /* 在plugins/input_s3c2410/utils.c int get_jpegsize (unsigned char *buf, int insize) { int i; for ( i= 1024 ; i< insize; i++) { if ((buf[i] == 0xFF) && (buf[i+1] == 0xD9)) return i+10; //图像结束标志 0xFF 0xD9 } return -1; } */
在plugins/input_s3c2410/s3c2410.c,s3c2410_Grab源码如下,这个是从设备读数据的关键部分
int s3c2410_Grab (struct vdIn *vd ) { static int frame = 0; //记录抓取到第几帧 int len; int size; int err = 0; int jpegsize = 0; int qualite = 1024; struct frame_t *headerframe; double timecourant =0; double temps = 0; //记录将原始数据转换成图像格式所用的时间,ms timecourant = ms_time(); //即将开始读取,记下当前时间 /* read method */ size = vd->framesizeIn; //要从驱动中读取的字节数。 vd->framesizeIn=width*height*2; do //读取数据,死循环,直到读到数据。如果驱动从硬件采集数据较慢的话,该线程大多数的时间可能就在这个地方打转。 { len = read (vd->fd, vd->pFramebuffer, size); //从驱动中读取size字节原始数据到vd->pFramebuffer if(!len ) //not yet ready sched_yield(); //如果驱动还未准备好数据,线程主动让出cpu,即主动向os申请调度到可运行队列末尾。但不是阻塞即不是调度到等待队列。 } while(!len); if(len<0) { printf ("2440 read error\n"); return -1; } /* Is there someone using the frame */ while((vd->framelock[vd->frame_cour] != 0)&& vd->signalquit) usleep(1000); pthread_mutex_lock (&vd->grabmutex); //写数据前上锁(互斥量,互斥锁),是对本模块内的全局结构体vd的并发保护。 //但我觉得可以不用保护,因为自始自终只有这一个线程可以操作这个数据,其他的socket线程根本不会接触这个数据的。 /* memcpy (vd->ptframe[vd->frame_cour]+ sizeof(struct frame_t), vd->pFramebuffer, vd->framesizeIn); jpegsize =jpeg_compress(vd->ptframe[vd->frame_cour]+ sizeof(struct frame_t),len, vd->pFramebuffer, vd->hdrwidth, vd->hdrheight, qualite); */ temps = ms_time(); jpegsize= convertframe(vd->ptframe[vd->frame_cour]+ sizeof(struct frame_t),//对原始数据转换及编码,见下面 vd->pFramebuffer, vd->hdrwidth, vd->hdrheight, vd->formatIn, qualite, vd->framesizeIn); /* 函数原型是 int convertframe(unsigned char *dst, unsigned char *src, int width,int height, int formatIn, int qualite,int buf_size) 可见是将原始数据vd->pFramebuffer转换成图像数据,然后用比如vd->ptframe[0]指向之。 */ headerframe=(struct frame_t*)vd->ptframe[vd->frame_cour]; //真正的图像帧格式headerframe指向转换后的数据区 snprintf(headerframe->header,5,"%s","2410"); headerframe->seqtimes = ms_time(); //读取并转换完毕,记下当前时间 headerframe->deltatimes=(int)(headerframe->seqtimes-timecourant); //记录从开始读取数据到数据编码完成所用时间 headerframe->w = vd->hdrwidth; headerframe->h = vd->hdrheight; headerframe->size = (( jpegsize < 0)?0:jpegsize);; headerframe->format = vd->formatIn; headerframe->nbframe = frame++; DBG("compress frame %d times %f\n",frame, headerframe->seqtimes-temps); /* 打印出来的是转换时间,不包括从驱动读取数据用去的时间。所以比较均匀。 这个就是终端里一直打印出来的,比如 次数 所用时间ms DBG(s3c2410.c, s3c2410_Grab(), 172): compress frame 34599 times 175.409058 DBG(s3c2410.c, s3c2410_Grab(), 172): compress frame 34600 times 176.015015 DBG(s3c2410.c, s3c2410_Grab(), 172): compress frame 34601 times 175.151001 DBG(s3c2410.c, s3c2410_Grab(), 172): compress frame 34602 times 175.377930 */ vd->frame_cour = (vd->frame_cour +1) % OUTFRMNUMB; //#define OUTFRMNUMB 1 /* 当前帧索引+1, 可以看出作者是每次从驱动中读取一个原始数据帧放在vd->pFramebuffer,将数据转换后使用当前帧vd->ptframe[vd->frame_cour]指向,而每读取一帧则vd->frame_cour++。 由于#define OUTFRMNUMB 1,所以vd->ptframe[vd->frame_cour]总是vd->ptframe[0]. 而headerframe是本函数内的局部变量,总是指向vd->ptframe[0]。记录转换后的图像信息可以打印出来以供调试参考 在s3c2410_Grab()返回点cam_thread()里面也有一个局部变量headerframe,也是指向vd->ptframe[0],但也不重要。重要的是那个pictureData 是全局global变量的buf和size的源头活水,见cam_thread(). */ pthread_mutex_unlock (&vd->grabmutex); /************************************/ return jpegsize; }
在plugins/input_s3c2410/s3c2410.c,
int convertframe(unsigned char *dst,unsigned char *src, int width,int height, int formatIn, int qualite,int buf_size) { int ret=0; //unsigned char *tmp=malloc(width*height*2); RGB565_2_YCbCr420(src,src,width,height); //inplace conversion //见下面 ret=s_encode_image(src,dst,qualite,FORMAT_CbCr420,width,height,buf_size); //图像编码,见下面 //free(tmp); return ret; }
在根目录的simplified_jpeg_encoder.c,
/* translate RGB565 to YUV420 in input */ void RGB565_2_YCbCr420(uint8_t * input_ptr, uint8_t * output_ptr, uint32_t image_width, uint32_t image_height) { uint32_t i, j, size; uint8_t R, G, B, R1, G1, B1, Rd, Gd, Bd, Rd1, Gd1, Bd1; S_INT Y, Yd, Y11, Yd1, Cb, Cr; S_JPEG_RGB16 * inbuf = (S_JPEG_RGB16 *) input_ptr; S_JPEG_RGB16 * inbuf1 = inbuf + (image_width); size = image_width * image_height >> 2; for (i = size, j = 0; i > 0; i--) { B = inbuf[0].blue << 3; G = inbuf[0].green << 2; R = inbuf[0].red << 3; B1 = inbuf[1].blue << 3; G1 = inbuf[1].green << 2; R1 = inbuf[1].red << 3; Bd = inbuf1[0].blue << 3; Gd = inbuf1[0].green << 2; Rd = inbuf1[0].red << 3; Bd1 = inbuf1[1].blue << 3; Gd1 = inbuf[1].green << 2; Rd1 = inbuf[1].red << 3; inbuf += 2; inbuf1 += 2; j++; if (j >= image_width / 2) { j = 0; inbuf += (image_width); inbuf1 += (image_width); } Y = CLIP((77 * R + 150 * G + 29 * B) >> 8); Y11 = CLIP((77 * R1 + 150 * G1 + 29 * B1) >> 8); Yd = CLIP((77 * Rd + 150 * Gd + 29 * Bd) >> 8); Yd1 = CLIP((77 * Rd1 + 150 * Gd1 + 29 * Bd1) >> 8); Cb = CLIP(((-43 * R - 85 * G + 128 * B) >> 8) + 128); Cr = CLIP(((128 * R - 107 * G - 21 * B) >> 8) + 128); *output_ptr++ = (uint8_t) Y; *output_ptr++ = (uint8_t) Y11; *output_ptr++ = (uint8_t) Yd; *output_ptr++ = (uint8_t) Yd1; *output_ptr++ = (uint8_t) Cb; *output_ptr++ = (uint8_t) Cr; } }
在根目录的simplified_jpeg_encoder.c,
uint32_t s_encode_image(uint8_t * input_ptr, uint8_t * output_ptr, uint32_t quality_factor, int image_format, uint32_t image_width, uint32_t image_height, uint32_t output_buffer_size) { S_UINT i, j; S_UINT last_col; S_UINT last_row; uint8_t * output; S_JPEG_ENCODER_STRUCTURE JpegStruct; S_JPEG_ENCODER_STRUCTURE * enc = &JpegStruct; /*(S_JPEG_ENCODER_STRUCTURE *)malloc(sizeof(S_JPEG_ENCODER_STRUCTURE)); memset(enc,0,sizeof(S_JPEG_ENCODER_STRUCTURE));*/ output = output_ptr; /* Initialization of JPEG control structure */ initialization(enc, image_format, image_width, image_height); /* Quantization Table Initialization */ initialize_quantization_tables(enc,quality_factor); /* Writing Marker Data */ output_ptr = write_markers(enc,output_ptr, image_format, image_width, image_height); last_row=enc->vertical_mcus-1; last_col=enc->horizontal_mcus-1; for (i = 0; i < enc->vertical_mcus; i++) { if (i < last_row) enc->rows = enc->mcu_height; else enc->rows = enc->rows_in_bottom_mcus; for (j = 0; j < enc->horizontal_mcus; j++) { if (j < last_col) { enc->cols = enc->mcu_width; enc->scan_line_incr = enc->length_minus_mcu_width; } else { enc->cols = enc->cols_in_right_mcus; enc->scan_line_incr = enc->length_minus_width; } enc->read_format(enc, input_ptr,i,j); /* Encode the data in MCU */ output_ptr = encodeMCU(enc, image_format, output_ptr); //input_ptr += enc->mcu_width_size; } //input_ptr += enc->mcu_line_offset; } /* Close Routine */ output_ptr = close_bitstream(enc,output_ptr); //free(enc); return (uint32_t)(output_ptr - output); }