足球游戏_中国足彩网¥体育资讯$

python实现多线程采集的2个代码例子
来源:易贤网 阅读:913 次 日期:2016-06-24 10:14:58
温馨提示:易贤网小编为您整理了“python实现多线程采集的2个代码例子”,方便广大网友查阅!

这篇文章主要介绍了python多线程采集代码例子,使用了threading、queue、mysqldb等模块,需要的朋友可以参考下。

代码一:

代码如下:

#!/usr/bin/python

# -*- coding: utf-8 -*-

#encoding=utf-8

import threading

import queue

import sys

import urllib2

import re

import mysqldb

#

# 数据库变量设置

#

db_host = '127.0.0.1'

db_user = xxxx

db_passwd = xxxxxxxx

db_name = xxxx

#

# 变量设置

#

thread_limit = 3

jobs = queue.queue(5)

singlelock = threading.lock()

info = queue.queue()

def workerbee(inputlist):

    for x in xrange(thread_limit):

        print 'thead {0} started.'.format(x)

        t = spider()

        t.start()

    for i in inputlist:

        try:

            jobs.put(i, block=true, timeout=5)

        except:

            singlelock.acquire()

            print the queue is full !

            singlelock.release()

    # wait for the threads to finish

    singlelock.acquire()        # acquire the lock so we can print

    print waiting for threads to finish.

    singlelock.release()        # release the lock

    jobs.join()              # this command waits for all threads to finish.

    # while not jobs.empty():

    #   print jobs.get()

def gettitle(url,time=10):

    response = urllib2.urlopen(url,timeout=time)

    html = response.read()

    response.close()

    reg = r'<title>(.*?)</title>'

    title = re.compile(reg).findall(html)

    # title = title[0].decode('gb2312','replace').encode('utf-8')

    title = title[0]

    return title

class spider(threading.thread):

    def run(self):

        while 1:

            try:

                job = jobs.get(true,1)

                singlelock.acquire()

                title = gettitle(job[1])

                info.put([job[0],title], block=true, timeout=5)

                # print 'this {0} is {1}'.format(job[1],title)

                singlelock.release()

                jobs.task_done()

            except:

                break;

if __name__ == '__main__':

    con = none

    urls = []

    try:

        con = mysqldb.connect(db_host,db_user,db_passwd,db_name)

        cur = con.cursor()

        cur.execute('select id,url from `table_name` where `status`=0 limit 10')

        rows = cur.fetchall()

        for row in rows:

            # print row

            urls.append([row[0],row[1]])

        workerbee(urls)

        while not info.empty():

            print info.get()

    finally:

        if con:

            con.close()代码二:

代码如下:

#!/usr/bin/python

# -*- coding: utf-8 -*-

#encoding=utf-8

#filename:robot.py

import threading,queue,sys,urllib2,re

#

# 变量设置

#

thread_limit = 3        #设置线程数

jobs = queue.queue(5)      #设置队列长度

singlelock = threading.lock()    #设置一个线程锁,避免重复调用

urls = ['http://xxx.com/w/n/2013-04-28/1634703505.shtml','http://xxx.com/w/n/2013-04-28/1246703487.shtml','http://xxx.com/w/n/2013-04-28/1028703471.shtml','http://xxx.com/w/n/2013-04-27/1015703426.shtml','http://xxx.com/w/n/2013-04-26/1554703373.shtml','http://xxx.com/w/n/2013-04-26/1512703346.shtml','http://xxx.com/w/n/2013-04-26/1453703334.shtml','http://xxx.com/w/n/2013-04-26/1451703333.shtml','http://xxx.com/w/n/2013-04-26/1445703329.shtml','http://xxx.com/w/n/2013-04-26/1434703322.shtml','http://xxx.com/w/n/2013-04-26/1433703321.shtml','http://xxx.com/w/n/2013-04-26/1433703320.shtml','http://xxx.com/w/n/2013-04-26/1429703318.shtml','http://xxx.com/w/n/2013-04-26/1429703317.shtml','http://xxx.com/w/n/2013-04-26/1409703297.shtml','http://xxx.com/w/n/2013-04-26/1406703296.shtml','http://xxx.com/w/n/2013-04-26/1402703292.shtml','http://xxx.com/w/n/2013-04-26/1353703286.shtml','http://xxx.com/w/n/2013-04-26/1348703284.shtml','http://xxx.com/w/n/2013-04-26/1327703275.shtml','http://xxx.com/w/n/2013-04-26/1239703265.shtml','http://xxx.com/w/n/2013-04-26/1238703264.shtml','http://xxx.com/w/n/2013-04-26/1231703262.shtml','http://xxx.com/w/n/2013-04-26/1229703261.shtml','http://xxx.com/w/n/2013-04-26/1228703260.shtml','http://xxx.com/w/n/2013-04-26/1223703259.shtml','http://xxx.com/w/n/2013-04-26/1218703258.shtml','http://xxx.com/w/n/2013-04-26/1202703254.shtml','http://xxx.com/w/n/2013-04-26/1159703251.shtml','http://xxx.com/w/n/2013-04-26/1139703233.shtml']

def workerbee(inputlist):

  for x in xrange(thread_limit):

    print 'thead {0} started.'.format(x)

    t = spider()

    t.start()

  for i in inputlist:

    try:

      jobs.put(i, block=true, timeout=5)

    except:

      singlelock.acquire()

      print the queue is full !

      singlelock.release()

  # wait for the threads to finish

  singlelock.acquire()    # acquire the lock so we can print

  print waiting for threads to finish.

  singlelock.release()    # release the lock

  jobs.join()       # this command waits for all threads to finish.

  # while not jobs.empty():

  #  print jobs.get()

def gettitle(url,time=10):

  response = urllib2.urlopen(url,timeout=time)

  html = response.read()

  response.close()

  reg = r'<title>(.*?)</title>'

  title = re.compile(reg).findall(html)

  title = title[0].decode('gb2312','replace').encode('utf-8')

  return title

class spider(threading.thread):

  def run(self):

    while 1:

      try:

        job = jobs.get(true,1)

        singlelock.acquire()

        title = gettitle(job)

        print 'this {0} is {1}'.format(job,title)

        singlelock.release()

        jobs.task_done()

      except:

        break;

if __name__ == '__main__':

  workerbee(urls)

中国足彩网信息请查看脚本栏目
上一篇:windwo窗口操作
由于各方面情况的不断调整与变化,易贤网提供的所有考试信息和咨询回复仅供参考,敬请考生以权威部门公布的正式信息和咨询为准!
关于我们 | 联系我们 | 人才招聘 | 网站声明 | 网站帮助 | 非正式的简要咨询 | 简要咨询须知 | 加入群交流 | 手机站点 | 投诉建议
工业和信息化部备案号:滇ICP备2023014141号-1 足球游戏_中国足彩网¥体育资讯$ 滇公网安备53010202001879号 人力资源服务许可证:(云)人服证字(2023)第0102001523号
云南网警备案专用图标
联系电话:0871-65317125(9:00—18:00) 获取招聘考试信息及咨询关注公众号:hfpxwx
咨询QQ:526150442(9:00—18:00)版权所有:易贤网
云南网警报警专用图标