mirror of
git://projects.qi-hardware.com/openwrt-packages.git
synced 2024-11-26 03:35:55 +02:00
new pakcage: icarus python miner software
This commit is contained in:
parent
cc8b88b103
commit
f8417fdf94
45
icarus-miner/Makefile
Normal file
45
icarus-miner/Makefile
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
#
|
||||||
|
# This is free software, licensed under the GNU General Public License v2.
|
||||||
|
# See /LICENSE for more information.
|
||||||
|
|
||||||
|
# those files can found under: so far those python build for ar71xx (tl-wr1043nd)
|
||||||
|
# pyserial_2.4-1_ar71xx.ipk
|
||||||
|
# python_2.6.4-3_ar71xx.ipk
|
||||||
|
# python-mini_2.6.4-3_ar71xx.ipk
|
||||||
|
# https://github.com/ngzhang/Icarus/tree/master/miner_software
|
||||||
|
|
||||||
|
# more info please check:
|
||||||
|
# http://en.qi-hardware.com/wiki/Icarus#Using_TP-link.2Ftl-wr1043nd_as_host
|
||||||
|
|
||||||
|
include $(TOPDIR)/rules.mk
|
||||||
|
|
||||||
|
PKG_NAME:=icarus-miner
|
||||||
|
PKG_VERSION:=20120209
|
||||||
|
|
||||||
|
include $(INCLUDE_DIR)/package.mk
|
||||||
|
|
||||||
|
define Package/icarus-miner
|
||||||
|
PKGARCH:=all
|
||||||
|
MAINTAINER:="Xiangfu Liu" <xiangfu@sharism.cc>
|
||||||
|
TITLE:=icarus miner software
|
||||||
|
SECTION:=utils
|
||||||
|
CATEGORY:=Utilities
|
||||||
|
URL:=http://en.qi-hardware.com/wiki/Icarus
|
||||||
|
DEPENDS:=+librt +libpthread +libffi +zlib +kmod-usb-serial-pl2303
|
||||||
|
endef
|
||||||
|
|
||||||
|
define Build/Compile
|
||||||
|
endef
|
||||||
|
|
||||||
|
define Package/icarus-miner/install
|
||||||
|
$(CP) ./data/* $(1)/
|
||||||
|
endef
|
||||||
|
|
||||||
|
define Package/icarus-miner/postinst
|
||||||
|
#!/bin/sh
|
||||||
|
crontab /root/crontab.icarus
|
||||||
|
/etc/init.d/cron enable
|
||||||
|
/etc/init.d/cron start
|
||||||
|
endef
|
||||||
|
|
||||||
|
$(eval $(call BuildPackage,icarus-miner))
|
1
icarus-miner/data/root/crontab.icarus
Normal file
1
icarus-miner/data/root/crontab.icarus
Normal file
@ -0,0 +1 @@
|
|||||||
|
*/10 * * * * cd /root/scripts && ./icarus_monitor.sh
|
323
icarus-miner/data/root/queue_ver/miner.py
Executable file
323
icarus-miner/data/root/queue_ver/miner.py
Executable file
@ -0,0 +1,323 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
# by teknohog
|
||||||
|
|
||||||
|
# Python wrapper for my serial port FPGA Bitcoin miners
|
||||||
|
|
||||||
|
from jsonrpc import ServiceProxy
|
||||||
|
from time import ctime, sleep, time
|
||||||
|
from serial import Serial
|
||||||
|
from threading import Thread, Event, Lock
|
||||||
|
from Queue import Queue
|
||||||
|
from optparse import OptionParser
|
||||||
|
|
||||||
|
def stats(count, starttime):
|
||||||
|
# 2**32 hashes per share (difficulty 1)
|
||||||
|
mhshare = 4294.967296
|
||||||
|
|
||||||
|
s = sum(count)
|
||||||
|
tdelta = time() - starttime
|
||||||
|
rate = s * mhshare / tdelta
|
||||||
|
|
||||||
|
# This is only a rough estimate of the true hash rate,
|
||||||
|
# particularly when the number of events is low. However, since
|
||||||
|
# the events follow a Poisson distribution, we can estimate the
|
||||||
|
# standard deviation (sqrt(n) for n events). Thus we get some idea
|
||||||
|
# on how rough an estimate this is.
|
||||||
|
|
||||||
|
# s should always be positive when this function is called, but
|
||||||
|
# checking for robustness anyway
|
||||||
|
if s > 0:
|
||||||
|
stddev = rate / s**0.5
|
||||||
|
else:
|
||||||
|
stddev = 0
|
||||||
|
|
||||||
|
return "[%i accepted, %i failed, %.2f +/- %.2f Mhash/s]" % (count[0], count[1], rate, stddev)
|
||||||
|
|
||||||
|
class Reader(Thread):
|
||||||
|
def __init__(self):
|
||||||
|
Thread.__init__(self)
|
||||||
|
|
||||||
|
self.daemon = True
|
||||||
|
|
||||||
|
# flush the input buffer
|
||||||
|
#ser.read(1000)
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
while True:
|
||||||
|
nonce = ser.read(4)
|
||||||
|
|
||||||
|
if len(nonce) == 4:
|
||||||
|
# Keep this order, because writer.block will be
|
||||||
|
# updated due to the golden event.
|
||||||
|
submitter = Submitter(writer.block, nonce)
|
||||||
|
submitter.start()
|
||||||
|
if options.debug:
|
||||||
|
print("raise golden event\n")
|
||||||
|
golden.set()
|
||||||
|
|
||||||
|
|
||||||
|
class Writer(Thread):
|
||||||
|
def __init__(self):
|
||||||
|
Thread.__init__(self)
|
||||||
|
|
||||||
|
# Keep something sensible available while waiting for the
|
||||||
|
# first getwork
|
||||||
|
#self.block = "0" * 256
|
||||||
|
#self.midstate = "0" * 64
|
||||||
|
|
||||||
|
# This will produce nonce 063c5e01 -> debug by using a bogus URL
|
||||||
|
self.block = "0000000120c8222d0497a7ab44a1a2c7bf39de941c9970b1dc7cdc400000079700000000e88aabe1f353238c668d8a4df9318e614c10c474f8cdf8bc5f6397b946c33d7c4e7242c31a098ea500000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000080020000"
|
||||||
|
self.midstate = "33c5bf5751ec7f7e056443b5aee3800331432c83f404d9de38b94ecbf907b92d"
|
||||||
|
|
||||||
|
self.daemon = True
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
while True:
|
||||||
|
result =0
|
||||||
|
#try:
|
||||||
|
# work = bitcoin.getwork()
|
||||||
|
# self.block = work['data']
|
||||||
|
# self.midstate = work['midstate']
|
||||||
|
#except:
|
||||||
|
# print("RPC getwork error")
|
||||||
|
# In this case, keep crunching with the old data. It will get
|
||||||
|
# stale at some point, but it's better than doing nothing.
|
||||||
|
|
||||||
|
# Just a reminder of how Python slices work in reverse
|
||||||
|
#rdata = self.block.decode('hex')[::-1]
|
||||||
|
#rdata2 = rdata[32:64]
|
||||||
|
work = wq.read_work_queue()
|
||||||
|
self.block = work['data']
|
||||||
|
self.midstate = work['midstate']
|
||||||
|
#print("push work to miner")
|
||||||
|
rdata2 = self.block.decode('hex')[95:63:-1]
|
||||||
|
|
||||||
|
rmid = self.midstate.decode('hex')[::-1]
|
||||||
|
|
||||||
|
payload = rmid + rdata2
|
||||||
|
|
||||||
|
ser.write(payload)
|
||||||
|
result = golden.wait(options.askrate)
|
||||||
|
|
||||||
|
if result:
|
||||||
|
golden.clear()
|
||||||
|
if options.debug:
|
||||||
|
print("clear golden event")
|
||||||
|
|
||||||
|
class WorkQueue:
|
||||||
|
def __init__(self, max_num):
|
||||||
|
self.max_num = max_num+1
|
||||||
|
self.ptr = 0
|
||||||
|
self.ptr_tobe = 0;
|
||||||
|
self.tail = 0
|
||||||
|
self.in_wr = 0;
|
||||||
|
self.work = {}
|
||||||
|
self.work_queue = []
|
||||||
|
for i in range(self.max_num):
|
||||||
|
self.work_queue.append({})
|
||||||
|
def get_from_server(self):
|
||||||
|
get_success = 0
|
||||||
|
while get_success != 1 :
|
||||||
|
try:
|
||||||
|
self.work_queue[self.ptr_tobe] = bitcoin.getwork()
|
||||||
|
get_success = 1
|
||||||
|
except:
|
||||||
|
print("RPC getwork error")
|
||||||
|
def is_full(self):
|
||||||
|
ptr_mutex.acquire()
|
||||||
|
full = (self.ptr + 1) % self.max_num == self.tail
|
||||||
|
ptr_mutex.release()
|
||||||
|
return full
|
||||||
|
def write_work_queue(self):
|
||||||
|
#print("update work queue")
|
||||||
|
|
||||||
|
ptr_mutex.acquire()
|
||||||
|
if (self.ptr + 1) % self.max_num == self.tail:
|
||||||
|
if options.debug:
|
||||||
|
print("Queue is full")
|
||||||
|
self.tail = (self.tail + 1) % self.max_num
|
||||||
|
self.ptr_tobe = (self.ptr + 1) % self.max_num
|
||||||
|
#print("write0:tail=", self.tail, "ptr_tobe=", self.ptr_tobe, "ptr="+self.ptr)
|
||||||
|
if options.debug:
|
||||||
|
print "write0:tail=%d, ptr_tobe=%d, ptr=%d" % (self.tail, self.ptr_tobe, self.ptr)
|
||||||
|
ptr_mutex.release()
|
||||||
|
#self.work_queue[self.ptr] = bitcoin.getwork()
|
||||||
|
|
||||||
|
write_queue_mutex.acquire()
|
||||||
|
self.get_from_server()
|
||||||
|
write_queue_mutex.release()
|
||||||
|
|
||||||
|
ptr_mutex.acquire()
|
||||||
|
if (self.ptr + 1) % self.max_num != self.ptr_tobe:
|
||||||
|
self.work_queue[self.ptr] = self.work_queue[self.ptr_tobe]
|
||||||
|
else:
|
||||||
|
self.ptr = self.ptr_tobe
|
||||||
|
if options.debug:
|
||||||
|
print "write1:tail=%d, ptr_tobe=%d, ptr=%d" % (self.tail, self.ptr_tobe, self.ptr)
|
||||||
|
#print("write1:tail="+self.tail+"ptr_tobe="+self.ptr_tobe+"ptr="+self.ptr)
|
||||||
|
ptr_mutex.release()
|
||||||
|
|
||||||
|
#print("1update work queue")
|
||||||
|
|
||||||
|
def read_work_queue(self):
|
||||||
|
ptr_mutex.acquire()
|
||||||
|
#print("read from queue")
|
||||||
|
if options.debug:
|
||||||
|
print"read0:tail=%d, ptr=%d" % (self.tail, self.ptr)
|
||||||
|
if self.ptr == self.tail:
|
||||||
|
ptr_mutex.release()
|
||||||
|
#print("Queue is empty")
|
||||||
|
#print("1read from queue")
|
||||||
|
write_queue_mutex.acquire()
|
||||||
|
ptr_mutex.acquire()
|
||||||
|
if self.ptr == self.tail:
|
||||||
|
print("reader get queue")
|
||||||
|
self.ptr_tobe = (self.ptr + 1) % self.max_num
|
||||||
|
self.get_from_server()
|
||||||
|
self.ptr = self.ptr_tobe
|
||||||
|
write_queue_mutex.release()
|
||||||
|
|
||||||
|
self.work = self.work_queue[self.ptr]
|
||||||
|
|
||||||
|
if self.ptr == 0:
|
||||||
|
self.ptr = self.max_num - 1
|
||||||
|
else:
|
||||||
|
self.ptr = self.ptr - 1
|
||||||
|
#print("read0:tail="+self.tail+"ptr="+self.ptr)
|
||||||
|
if options.debug:
|
||||||
|
print"read1:tail=%d, ptr=%d" % (self.tail, self.ptr)
|
||||||
|
|
||||||
|
ptr_mutex.release()
|
||||||
|
|
||||||
|
return self.work
|
||||||
|
|
||||||
|
|
||||||
|
class GetWorkQueue(Thread):
|
||||||
|
def __init__(self):
|
||||||
|
Thread.__init__(self)
|
||||||
|
self.daemon = True
|
||||||
|
self.delay = (options.askrate>>1)+1
|
||||||
|
def run(self):
|
||||||
|
while True:
|
||||||
|
if options.debug:
|
||||||
|
print("GetWorkQueue thread")
|
||||||
|
wq.write_work_queue()
|
||||||
|
#if (self.ptr + 1) % self.max_num == self.tail:
|
||||||
|
if(wq.is_full()):
|
||||||
|
sleep(4)
|
||||||
|
if options.debug:
|
||||||
|
print("queue is full, slow down request")
|
||||||
|
else:
|
||||||
|
if options.debug:
|
||||||
|
print("****\nfill the work queue at speed\n****")
|
||||||
|
#else:
|
||||||
|
# sleep(2)
|
||||||
|
|
||||||
|
|
||||||
|
class Submitter(Thread):
|
||||||
|
def __init__(self, block, nonce):
|
||||||
|
Thread.__init__(self)
|
||||||
|
|
||||||
|
self.block = block
|
||||||
|
self.nonce = nonce
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
# This thread will be created upon every submit, as they may
|
||||||
|
# come in sooner than the submits finish.
|
||||||
|
|
||||||
|
print("Block found on " + ctime() + "\n")
|
||||||
|
|
||||||
|
if stride > 0:
|
||||||
|
n = self.nonce.encode('hex')
|
||||||
|
print(n + " % " + str(stride) + " = " + str(int(n, 16) % stride))
|
||||||
|
elif options.debug:
|
||||||
|
print(self.nonce.encode('hex'))
|
||||||
|
|
||||||
|
hrnonce = self.nonce[::-1].encode('hex')
|
||||||
|
|
||||||
|
data = self.block[:152] + hrnonce + self.block[160:]
|
||||||
|
|
||||||
|
try:
|
||||||
|
result = bitcoin.getwork(data)
|
||||||
|
print("Upstream result: " + str(result))
|
||||||
|
print(self.nonce.encode('hex'))
|
||||||
|
except:
|
||||||
|
print("RPC send error")
|
||||||
|
print(self.nonce.encode('hex'))
|
||||||
|
# a sensible boolean for stats
|
||||||
|
result = False
|
||||||
|
|
||||||
|
results_queue.put(result)
|
||||||
|
|
||||||
|
class Display_stats(Thread):
|
||||||
|
def __init__(self):
|
||||||
|
Thread.__init__(self)
|
||||||
|
|
||||||
|
self.count = [0, 0]
|
||||||
|
self.starttime = time()
|
||||||
|
self.daemon = True
|
||||||
|
|
||||||
|
print("Miner started on " + ctime())
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
while True:
|
||||||
|
result = results_queue.get()
|
||||||
|
|
||||||
|
if result:
|
||||||
|
self.count[0] += 1
|
||||||
|
else:
|
||||||
|
self.count[1] += 1
|
||||||
|
|
||||||
|
print(stats(self.count, self.starttime))
|
||||||
|
|
||||||
|
results_queue.task_done()
|
||||||
|
|
||||||
|
parser = OptionParser()
|
||||||
|
|
||||||
|
parser.add_option("-a", "--askrate", dest="askrate", default=8, help="Seconds between getwork requests")
|
||||||
|
|
||||||
|
parser.add_option("-d", "--debug", dest="debug", default=False, action="store_true", help="Show each nonce result in hex")
|
||||||
|
|
||||||
|
parser.add_option("-m", "--miners", dest="miners", default=0, help="Show the nonce result remainder mod MINERS, to identify the node in a cluster")
|
||||||
|
|
||||||
|
parser.add_option("-u", "--url", dest="url", default="http://test_fpga_btc@hotmail.com:lzhjxntswc@pit.deepbit.net:8332/", help="URL for bitcoind or mining pool, typically http://user:password@host:8332/")
|
||||||
|
|
||||||
|
parser.add_option("-s", "--serial", dest="serial_port", default="com3", help="Serial port, e.g. /dev/ttyS0 on unix or COM1 in Windows")
|
||||||
|
|
||||||
|
(options, args) = parser.parse_args()
|
||||||
|
|
||||||
|
stride = int(options.miners)
|
||||||
|
|
||||||
|
golden = Event()
|
||||||
|
|
||||||
|
ptr_mutex = Lock();
|
||||||
|
write_queue_mutex = Lock();
|
||||||
|
|
||||||
|
bitcoin = ServiceProxy(options.url)
|
||||||
|
|
||||||
|
results_queue = Queue()
|
||||||
|
|
||||||
|
ser = Serial(options.serial_port, 115200, timeout=options.askrate)
|
||||||
|
|
||||||
|
wq = WorkQueue(5)
|
||||||
|
|
||||||
|
reader = Reader()
|
||||||
|
writer = Writer()
|
||||||
|
get_work_queue = GetWorkQueue()
|
||||||
|
disp = Display_stats()
|
||||||
|
|
||||||
|
get_work_queue.start()
|
||||||
|
reader.start()
|
||||||
|
writer.start()
|
||||||
|
disp.start()
|
||||||
|
|
||||||
|
try:
|
||||||
|
while True:
|
||||||
|
# Threads are generally hard to interrupt. So they are left
|
||||||
|
# running as daemons, and we do something simple here that can
|
||||||
|
# be easily terminated to bring down the entire script.
|
||||||
|
sleep(10000)
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
print("Terminated")
|
||||||
|
|
22
icarus-miner/data/root/scripts/icarus_monitor.sh
Executable file
22
icarus-miner/data/root/scripts/icarus_monitor.sh
Executable file
@ -0,0 +1,22 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
API_KEY=http://deepbit.net/api/4edf2d91069172fdae000000_DE38384EE2
|
||||||
|
WORKER=http://xiangfu.z@gmail.com_1:1234@pit.deepbit.net:8332/
|
||||||
|
|
||||||
|
################################################
|
||||||
|
|
||||||
|
SCRIPT_PATH=`pwd`
|
||||||
|
|
||||||
|
${SCRIPT_PATH}/icarus_undermanager.py -a ${API_KEY} > ${SCRIPT_PATH}/u.log 2>&1
|
||||||
|
|
||||||
|
TRUE_COUNT=`less ${SCRIPT_PATH}/u.log | grep "\"alive\": true" | wc -l`
|
||||||
|
HASHRATE=`less ${SCRIPT_PATH}/u.log | grep "\"hashrate\": 0," | wc -l`
|
||||||
|
|
||||||
|
if [ "${TRUE_COUNT}" == "0" ] || [ "${HASHRATE}" == "1" ]; then
|
||||||
|
echo `date` >> ${SCRIPT_PATH}/restart.log
|
||||||
|
|
||||||
|
ps ax | grep "python.*miner.py" | grep -v grep | sed 's/^ *//' | cut -d ' ' -f 1 | xargs kill -15
|
||||||
|
|
||||||
|
ICARUS_MINING_PATH="../queue_ver"
|
||||||
|
(cd ${ICARUS_MINING_PATH} && ./miner.py -u ${WORKER} -s /dev/ttyUSB0 > /dev/null 2>&1 &)
|
||||||
|
fi
|
28
icarus-miner/data/root/scripts/icarus_undermanager.py
Executable file
28
icarus-miner/data/root/scripts/icarus_undermanager.py
Executable file
@ -0,0 +1,28 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
import json, urllib
|
||||||
|
from optparse import OptionParser
|
||||||
|
|
||||||
|
class Deepbit(object):
|
||||||
|
@staticmethod
|
||||||
|
def get_stats(url):
|
||||||
|
try:
|
||||||
|
result = json.load(urllib.urlopen(url))
|
||||||
|
except:
|
||||||
|
# An error occurred; raise an exception
|
||||||
|
raise NameError('Could not get the data, sorry. Maybe a non-functional internet connection or wrong API key?')
|
||||||
|
return result
|
||||||
|
|
||||||
|
try:
|
||||||
|
parser = OptionParser()
|
||||||
|
parser.add_option("-a",
|
||||||
|
"--api-key",
|
||||||
|
dest="api",
|
||||||
|
default="http://deepbit.net/api/4edf2d91069172fdae000000_DE38384EE2",
|
||||||
|
help="JSON API key")
|
||||||
|
|
||||||
|
(options, args) = parser.parse_args()
|
||||||
|
|
||||||
|
print json.dumps(Deepbit.get_stats(options.api), indent=2)
|
||||||
|
except Exception as e:
|
||||||
|
print e
|
8
icarus-miner/data/root/scripts/restart.sh
Executable file
8
icarus-miner/data/root/scripts/restart.sh
Executable file
@ -0,0 +1,8 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
WORKER=http://xiangfu.z@gmail.com_1:1234@pit.deepbit.net:8332/
|
||||||
|
|
||||||
|
ps ax | grep "python.*miner.py" | grep -v grep | sed 's/^ *//' | cut -d ' ' -f 1 | xargs kill -15
|
||||||
|
|
||||||
|
ICARUS_MINING_PATH="../queue_ver"
|
||||||
|
(cd ${ICARUS_MINING_PATH} && ./miner.py -u ${WORKER} -s /dev/ttyUSB0 > /dev/null 2>&1 &)
|
1
icarus-miner/data/usr/bin/python
Symbolic link
1
icarus-miner/data/usr/bin/python
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
python2.6
|
BIN
icarus-miner/data/usr/bin/python2.6
Executable file
BIN
icarus-miner/data/usr/bin/python2.6
Executable file
Binary file not shown.
244
icarus-miner/data/usr/lib/python2.6/Queue.py
Normal file
244
icarus-miner/data/usr/lib/python2.6/Queue.py
Normal file
@ -0,0 +1,244 @@
|
|||||||
|
"""A multi-producer, multi-consumer queue."""
|
||||||
|
|
||||||
|
from time import time as _time
|
||||||
|
from collections import deque
|
||||||
|
import heapq
|
||||||
|
|
||||||
|
__all__ = ['Empty', 'Full', 'Queue', 'PriorityQueue', 'LifoQueue']
|
||||||
|
|
||||||
|
class Empty(Exception):
|
||||||
|
"Exception raised by Queue.get(block=0)/get_nowait()."
|
||||||
|
pass
|
||||||
|
|
||||||
|
class Full(Exception):
|
||||||
|
"Exception raised by Queue.put(block=0)/put_nowait()."
|
||||||
|
pass
|
||||||
|
|
||||||
|
class Queue:
|
||||||
|
"""Create a queue object with a given maximum size.
|
||||||
|
|
||||||
|
If maxsize is <= 0, the queue size is infinite.
|
||||||
|
"""
|
||||||
|
def __init__(self, maxsize=0):
|
||||||
|
try:
|
||||||
|
import threading
|
||||||
|
except ImportError:
|
||||||
|
import dummy_threading as threading
|
||||||
|
self.maxsize = maxsize
|
||||||
|
self._init(maxsize)
|
||||||
|
# mutex must be held whenever the queue is mutating. All methods
|
||||||
|
# that acquire mutex must release it before returning. mutex
|
||||||
|
# is shared between the three conditions, so acquiring and
|
||||||
|
# releasing the conditions also acquires and releases mutex.
|
||||||
|
self.mutex = threading.Lock()
|
||||||
|
# Notify not_empty whenever an item is added to the queue; a
|
||||||
|
# thread waiting to get is notified then.
|
||||||
|
self.not_empty = threading.Condition(self.mutex)
|
||||||
|
# Notify not_full whenever an item is removed from the queue;
|
||||||
|
# a thread waiting to put is notified then.
|
||||||
|
self.not_full = threading.Condition(self.mutex)
|
||||||
|
# Notify all_tasks_done whenever the number of unfinished tasks
|
||||||
|
# drops to zero; thread waiting to join() is notified to resume
|
||||||
|
self.all_tasks_done = threading.Condition(self.mutex)
|
||||||
|
self.unfinished_tasks = 0
|
||||||
|
|
||||||
|
def task_done(self):
|
||||||
|
"""Indicate that a formerly enqueued task is complete.
|
||||||
|
|
||||||
|
Used by Queue consumer threads. For each get() used to fetch a task,
|
||||||
|
a subsequent call to task_done() tells the queue that the processing
|
||||||
|
on the task is complete.
|
||||||
|
|
||||||
|
If a join() is currently blocking, it will resume when all items
|
||||||
|
have been processed (meaning that a task_done() call was received
|
||||||
|
for every item that had been put() into the queue).
|
||||||
|
|
||||||
|
Raises a ValueError if called more times than there were items
|
||||||
|
placed in the queue.
|
||||||
|
"""
|
||||||
|
self.all_tasks_done.acquire()
|
||||||
|
try:
|
||||||
|
unfinished = self.unfinished_tasks - 1
|
||||||
|
if unfinished <= 0:
|
||||||
|
if unfinished < 0:
|
||||||
|
raise ValueError('task_done() called too many times')
|
||||||
|
self.all_tasks_done.notify_all()
|
||||||
|
self.unfinished_tasks = unfinished
|
||||||
|
finally:
|
||||||
|
self.all_tasks_done.release()
|
||||||
|
|
||||||
|
def join(self):
|
||||||
|
"""Blocks until all items in the Queue have been gotten and processed.
|
||||||
|
|
||||||
|
The count of unfinished tasks goes up whenever an item is added to the
|
||||||
|
queue. The count goes down whenever a consumer thread calls task_done()
|
||||||
|
to indicate the item was retrieved and all work on it is complete.
|
||||||
|
|
||||||
|
When the count of unfinished tasks drops to zero, join() unblocks.
|
||||||
|
"""
|
||||||
|
self.all_tasks_done.acquire()
|
||||||
|
try:
|
||||||
|
while self.unfinished_tasks:
|
||||||
|
self.all_tasks_done.wait()
|
||||||
|
finally:
|
||||||
|
self.all_tasks_done.release()
|
||||||
|
|
||||||
|
def qsize(self):
|
||||||
|
"""Return the approximate size of the queue (not reliable!)."""
|
||||||
|
self.mutex.acquire()
|
||||||
|
n = self._qsize()
|
||||||
|
self.mutex.release()
|
||||||
|
return n
|
||||||
|
|
||||||
|
def empty(self):
|
||||||
|
"""Return True if the queue is empty, False otherwise (not reliable!)."""
|
||||||
|
self.mutex.acquire()
|
||||||
|
n = not self._qsize()
|
||||||
|
self.mutex.release()
|
||||||
|
return n
|
||||||
|
|
||||||
|
def full(self):
|
||||||
|
"""Return True if the queue is full, False otherwise (not reliable!)."""
|
||||||
|
self.mutex.acquire()
|
||||||
|
n = 0 < self.maxsize == self._qsize()
|
||||||
|
self.mutex.release()
|
||||||
|
return n
|
||||||
|
|
||||||
|
def put(self, item, block=True, timeout=None):
|
||||||
|
"""Put an item into the queue.
|
||||||
|
|
||||||
|
If optional args 'block' is true and 'timeout' is None (the default),
|
||||||
|
block if necessary until a free slot is available. If 'timeout' is
|
||||||
|
a positive number, it blocks at most 'timeout' seconds and raises
|
||||||
|
the Full exception if no free slot was available within that time.
|
||||||
|
Otherwise ('block' is false), put an item on the queue if a free slot
|
||||||
|
is immediately available, else raise the Full exception ('timeout'
|
||||||
|
is ignored in that case).
|
||||||
|
"""
|
||||||
|
self.not_full.acquire()
|
||||||
|
try:
|
||||||
|
if self.maxsize > 0:
|
||||||
|
if not block:
|
||||||
|
if self._qsize() == self.maxsize:
|
||||||
|
raise Full
|
||||||
|
elif timeout is None:
|
||||||
|
while self._qsize() == self.maxsize:
|
||||||
|
self.not_full.wait()
|
||||||
|
elif timeout < 0:
|
||||||
|
raise ValueError("'timeout' must be a positive number")
|
||||||
|
else:
|
||||||
|
endtime = _time() + timeout
|
||||||
|
while self._qsize() == self.maxsize:
|
||||||
|
remaining = endtime - _time()
|
||||||
|
if remaining <= 0.0:
|
||||||
|
raise Full
|
||||||
|
self.not_full.wait(remaining)
|
||||||
|
self._put(item)
|
||||||
|
self.unfinished_tasks += 1
|
||||||
|
self.not_empty.notify()
|
||||||
|
finally:
|
||||||
|
self.not_full.release()
|
||||||
|
|
||||||
|
def put_nowait(self, item):
|
||||||
|
"""Put an item into the queue without blocking.
|
||||||
|
|
||||||
|
Only enqueue the item if a free slot is immediately available.
|
||||||
|
Otherwise raise the Full exception.
|
||||||
|
"""
|
||||||
|
return self.put(item, False)
|
||||||
|
|
||||||
|
def get(self, block=True, timeout=None):
|
||||||
|
"""Remove and return an item from the queue.
|
||||||
|
|
||||||
|
If optional args 'block' is true and 'timeout' is None (the default),
|
||||||
|
block if necessary until an item is available. If 'timeout' is
|
||||||
|
a positive number, it blocks at most 'timeout' seconds and raises
|
||||||
|
the Empty exception if no item was available within that time.
|
||||||
|
Otherwise ('block' is false), return an item if one is immediately
|
||||||
|
available, else raise the Empty exception ('timeout' is ignored
|
||||||
|
in that case).
|
||||||
|
"""
|
||||||
|
self.not_empty.acquire()
|
||||||
|
try:
|
||||||
|
if not block:
|
||||||
|
if not self._qsize():
|
||||||
|
raise Empty
|
||||||
|
elif timeout is None:
|
||||||
|
while not self._qsize():
|
||||||
|
self.not_empty.wait()
|
||||||
|
elif timeout < 0:
|
||||||
|
raise ValueError("'timeout' must be a positive number")
|
||||||
|
else:
|
||||||
|
endtime = _time() + timeout
|
||||||
|
while not self._qsize():
|
||||||
|
remaining = endtime - _time()
|
||||||
|
if remaining <= 0.0:
|
||||||
|
raise Empty
|
||||||
|
self.not_empty.wait(remaining)
|
||||||
|
item = self._get()
|
||||||
|
self.not_full.notify()
|
||||||
|
return item
|
||||||
|
finally:
|
||||||
|
self.not_empty.release()
|
||||||
|
|
||||||
|
def get_nowait(self):
|
||||||
|
"""Remove and return an item from the queue without blocking.
|
||||||
|
|
||||||
|
Only get an item if one is immediately available. Otherwise
|
||||||
|
raise the Empty exception.
|
||||||
|
"""
|
||||||
|
return self.get(False)
|
||||||
|
|
||||||
|
# Override these methods to implement other queue organizations
|
||||||
|
# (e.g. stack or priority queue).
|
||||||
|
# These will only be called with appropriate locks held
|
||||||
|
|
||||||
|
# Initialize the queue representation
|
||||||
|
def _init(self, maxsize):
|
||||||
|
self.queue = deque()
|
||||||
|
|
||||||
|
def _qsize(self, len=len):
|
||||||
|
return len(self.queue)
|
||||||
|
|
||||||
|
# Put a new item in the queue
|
||||||
|
def _put(self, item):
|
||||||
|
self.queue.append(item)
|
||||||
|
|
||||||
|
# Get an item from the queue
|
||||||
|
def _get(self):
|
||||||
|
return self.queue.popleft()
|
||||||
|
|
||||||
|
|
||||||
|
class PriorityQueue(Queue):
|
||||||
|
'''Variant of Queue that retrieves open entries in priority order (lowest first).
|
||||||
|
|
||||||
|
Entries are typically tuples of the form: (priority number, data).
|
||||||
|
'''
|
||||||
|
|
||||||
|
def _init(self, maxsize):
|
||||||
|
self.queue = []
|
||||||
|
|
||||||
|
def _qsize(self, len=len):
|
||||||
|
return len(self.queue)
|
||||||
|
|
||||||
|
def _put(self, item, heappush=heapq.heappush):
|
||||||
|
heappush(self.queue, item)
|
||||||
|
|
||||||
|
def _get(self, heappop=heapq.heappop):
|
||||||
|
return heappop(self.queue)
|
||||||
|
|
||||||
|
|
||||||
|
class LifoQueue(Queue):
|
||||||
|
'''Variant of Queue that retrieves most recently added entries first.'''
|
||||||
|
|
||||||
|
def _init(self, maxsize):
|
||||||
|
self.queue = []
|
||||||
|
|
||||||
|
def _qsize(self, len=len):
|
||||||
|
return len(self.queue)
|
||||||
|
|
||||||
|
def _put(self, item):
|
||||||
|
self.queue.append(item)
|
||||||
|
|
||||||
|
def _get(self):
|
||||||
|
return self.queue.pop()
|
BIN
icarus-miner/data/usr/lib/python2.6/Queue.pyc
Normal file
BIN
icarus-miner/data/usr/lib/python2.6/Queue.pyc
Normal file
Binary file not shown.
323
icarus-miner/data/usr/lib/python2.6/StringIO.py
Normal file
323
icarus-miner/data/usr/lib/python2.6/StringIO.py
Normal file
@ -0,0 +1,323 @@
|
|||||||
|
r"""File-like objects that read from or write to a string buffer.
|
||||||
|
|
||||||
|
This implements (nearly) all stdio methods.
|
||||||
|
|
||||||
|
f = StringIO() # ready for writing
|
||||||
|
f = StringIO(buf) # ready for reading
|
||||||
|
f.close() # explicitly release resources held
|
||||||
|
flag = f.isatty() # always false
|
||||||
|
pos = f.tell() # get current position
|
||||||
|
f.seek(pos) # set current position
|
||||||
|
f.seek(pos, mode) # mode 0: absolute; 1: relative; 2: relative to EOF
|
||||||
|
buf = f.read() # read until EOF
|
||||||
|
buf = f.read(n) # read up to n bytes
|
||||||
|
buf = f.readline() # read until end of line ('\n') or EOF
|
||||||
|
list = f.readlines()# list of f.readline() results until EOF
|
||||||
|
f.truncate([size]) # truncate file at to at most size (default: current pos)
|
||||||
|
f.write(buf) # write at current position
|
||||||
|
f.writelines(list) # for line in list: f.write(line)
|
||||||
|
f.getvalue() # return whole file's contents as a string
|
||||||
|
|
||||||
|
Notes:
|
||||||
|
- Using a real file is often faster (but less convenient).
|
||||||
|
- There's also a much faster implementation in C, called cStringIO, but
|
||||||
|
it's not subclassable.
|
||||||
|
- fileno() is left unimplemented so that code which uses it triggers
|
||||||
|
an exception early.
|
||||||
|
- Seeking far beyond EOF and then writing will insert real null
|
||||||
|
bytes that occupy space in the buffer.
|
||||||
|
- There's a simple test set (see end of this file).
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
from errno import EINVAL
|
||||||
|
except ImportError:
|
||||||
|
EINVAL = 22
|
||||||
|
|
||||||
|
__all__ = ["StringIO"]
|
||||||
|
|
||||||
|
def _complain_ifclosed(closed):
|
||||||
|
if closed:
|
||||||
|
raise ValueError, "I/O operation on closed file"
|
||||||
|
|
||||||
|
class StringIO:
|
||||||
|
"""class StringIO([buffer])
|
||||||
|
|
||||||
|
When a StringIO object is created, it can be initialized to an existing
|
||||||
|
string by passing the string to the constructor. If no string is given,
|
||||||
|
the StringIO will start empty.
|
||||||
|
|
||||||
|
The StringIO object can accept either Unicode or 8-bit strings, but
|
||||||
|
mixing the two may take some care. If both are used, 8-bit strings that
|
||||||
|
cannot be interpreted as 7-bit ASCII (that use the 8th bit) will cause
|
||||||
|
a UnicodeError to be raised when getvalue() is called.
|
||||||
|
"""
|
||||||
|
def __init__(self, buf = ''):
|
||||||
|
# Force self.buf to be a string or unicode
|
||||||
|
if not isinstance(buf, basestring):
|
||||||
|
buf = str(buf)
|
||||||
|
self.buf = buf
|
||||||
|
self.len = len(buf)
|
||||||
|
self.buflist = []
|
||||||
|
self.pos = 0
|
||||||
|
self.closed = False
|
||||||
|
self.softspace = 0
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
return self
|
||||||
|
|
||||||
|
def next(self):
|
||||||
|
"""A file object is its own iterator, for example iter(f) returns f
|
||||||
|
(unless f is closed). When a file is used as an iterator, typically
|
||||||
|
in a for loop (for example, for line in f: print line), the next()
|
||||||
|
method is called repeatedly. This method returns the next input line,
|
||||||
|
or raises StopIteration when EOF is hit.
|
||||||
|
"""
|
||||||
|
_complain_ifclosed(self.closed)
|
||||||
|
r = self.readline()
|
||||||
|
if not r:
|
||||||
|
raise StopIteration
|
||||||
|
return r
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
"""Free the memory buffer.
|
||||||
|
"""
|
||||||
|
if not self.closed:
|
||||||
|
self.closed = True
|
||||||
|
del self.buf, self.pos
|
||||||
|
|
||||||
|
def isatty(self):
|
||||||
|
"""Returns False because StringIO objects are not connected to a
|
||||||
|
tty-like device.
|
||||||
|
"""
|
||||||
|
_complain_ifclosed(self.closed)
|
||||||
|
return False
|
||||||
|
|
||||||
|
def seek(self, pos, mode = 0):
|
||||||
|
"""Set the file's current position.
|
||||||
|
|
||||||
|
The mode argument is optional and defaults to 0 (absolute file
|
||||||
|
positioning); other values are 1 (seek relative to the current
|
||||||
|
position) and 2 (seek relative to the file's end).
|
||||||
|
|
||||||
|
There is no return value.
|
||||||
|
"""
|
||||||
|
_complain_ifclosed(self.closed)
|
||||||
|
if self.buflist:
|
||||||
|
self.buf += ''.join(self.buflist)
|
||||||
|
self.buflist = []
|
||||||
|
if mode == 1:
|
||||||
|
pos += self.pos
|
||||||
|
elif mode == 2:
|
||||||
|
pos += self.len
|
||||||
|
self.pos = max(0, pos)
|
||||||
|
|
||||||
|
def tell(self):
|
||||||
|
"""Return the file's current position."""
|
||||||
|
_complain_ifclosed(self.closed)
|
||||||
|
return self.pos
|
||||||
|
|
||||||
|
def read(self, n = -1):
|
||||||
|
"""Read at most size bytes from the file
|
||||||
|
(less if the read hits EOF before obtaining size bytes).
|
||||||
|
|
||||||
|
If the size argument is negative or omitted, read all data until EOF
|
||||||
|
is reached. The bytes are returned as a string object. An empty
|
||||||
|
string is returned when EOF is encountered immediately.
|
||||||
|
"""
|
||||||
|
_complain_ifclosed(self.closed)
|
||||||
|
if self.buflist:
|
||||||
|
self.buf += ''.join(self.buflist)
|
||||||
|
self.buflist = []
|
||||||
|
if n < 0:
|
||||||
|
newpos = self.len
|
||||||
|
else:
|
||||||
|
newpos = min(self.pos+n, self.len)
|
||||||
|
r = self.buf[self.pos:newpos]
|
||||||
|
self.pos = newpos
|
||||||
|
return r
|
||||||
|
|
||||||
|
def readline(self, length=None):
|
||||||
|
r"""Read one entire line from the file.
|
||||||
|
|
||||||
|
A trailing newline character is kept in the string (but may be absent
|
||||||
|
when a file ends with an incomplete line). If the size argument is
|
||||||
|
present and non-negative, it is a maximum byte count (including the
|
||||||
|
trailing newline) and an incomplete line may be returned.
|
||||||
|
|
||||||
|
An empty string is returned only when EOF is encountered immediately.
|
||||||
|
|
||||||
|
Note: Unlike stdio's fgets(), the returned string contains null
|
||||||
|
characters ('\0') if they occurred in the input.
|
||||||
|
"""
|
||||||
|
_complain_ifclosed(self.closed)
|
||||||
|
if self.buflist:
|
||||||
|
self.buf += ''.join(self.buflist)
|
||||||
|
self.buflist = []
|
||||||
|
i = self.buf.find('\n', self.pos)
|
||||||
|
if i < 0:
|
||||||
|
newpos = self.len
|
||||||
|
else:
|
||||||
|
newpos = i+1
|
||||||
|
if length is not None:
|
||||||
|
if self.pos + length < newpos:
|
||||||
|
newpos = self.pos + length
|
||||||
|
r = self.buf[self.pos:newpos]
|
||||||
|
self.pos = newpos
|
||||||
|
return r
|
||||||
|
|
||||||
|
def readlines(self, sizehint = 0):
|
||||||
|
"""Read until EOF using readline() and return a list containing the
|
||||||
|
lines thus read.
|
||||||
|
|
||||||
|
If the optional sizehint argument is present, instead of reading up
|
||||||
|
to EOF, whole lines totalling approximately sizehint bytes (or more
|
||||||
|
to accommodate a final whole line).
|
||||||
|
"""
|
||||||
|
total = 0
|
||||||
|
lines = []
|
||||||
|
line = self.readline()
|
||||||
|
while line:
|
||||||
|
lines.append(line)
|
||||||
|
total += len(line)
|
||||||
|
if 0 < sizehint <= total:
|
||||||
|
break
|
||||||
|
line = self.readline()
|
||||||
|
return lines
|
||||||
|
|
||||||
|
def truncate(self, size=None):
|
||||||
|
"""Truncate the file's size.
|
||||||
|
|
||||||
|
If the optional size argument is present, the file is truncated to
|
||||||
|
(at most) that size. The size defaults to the current position.
|
||||||
|
The current file position is not changed unless the position
|
||||||
|
is beyond the new file size.
|
||||||
|
|
||||||
|
If the specified size exceeds the file's current size, the
|
||||||
|
file remains unchanged.
|
||||||
|
"""
|
||||||
|
_complain_ifclosed(self.closed)
|
||||||
|
if size is None:
|
||||||
|
size = self.pos
|
||||||
|
elif size < 0:
|
||||||
|
raise IOError(EINVAL, "Negative size not allowed")
|
||||||
|
elif size < self.pos:
|
||||||
|
self.pos = size
|
||||||
|
self.buf = self.getvalue()[:size]
|
||||||
|
self.len = size
|
||||||
|
|
||||||
|
def write(self, s):
|
||||||
|
"""Write a string to the file.
|
||||||
|
|
||||||
|
There is no return value.
|
||||||
|
"""
|
||||||
|
_complain_ifclosed(self.closed)
|
||||||
|
if not s: return
|
||||||
|
# Force s to be a string or unicode
|
||||||
|
if not isinstance(s, basestring):
|
||||||
|
s = str(s)
|
||||||
|
spos = self.pos
|
||||||
|
slen = self.len
|
||||||
|
if spos == slen:
|
||||||
|
self.buflist.append(s)
|
||||||
|
self.len = self.pos = spos + len(s)
|
||||||
|
return
|
||||||
|
if spos > slen:
|
||||||
|
self.buflist.append('\0'*(spos - slen))
|
||||||
|
slen = spos
|
||||||
|
newpos = spos + len(s)
|
||||||
|
if spos < slen:
|
||||||
|
if self.buflist:
|
||||||
|
self.buf += ''.join(self.buflist)
|
||||||
|
self.buflist = [self.buf[:spos], s, self.buf[newpos:]]
|
||||||
|
self.buf = ''
|
||||||
|
if newpos > slen:
|
||||||
|
slen = newpos
|
||||||
|
else:
|
||||||
|
self.buflist.append(s)
|
||||||
|
slen = newpos
|
||||||
|
self.len = slen
|
||||||
|
self.pos = newpos
|
||||||
|
|
||||||
|
def writelines(self, iterable):
|
||||||
|
"""Write a sequence of strings to the file. The sequence can be any
|
||||||
|
iterable object producing strings, typically a list of strings. There
|
||||||
|
is no return value.
|
||||||
|
|
||||||
|
(The name is intended to match readlines(); writelines() does not add
|
||||||
|
line separators.)
|
||||||
|
"""
|
||||||
|
write = self.write
|
||||||
|
for line in iterable:
|
||||||
|
write(line)
|
||||||
|
|
||||||
|
def flush(self):
|
||||||
|
"""Flush the internal buffer
|
||||||
|
"""
|
||||||
|
_complain_ifclosed(self.closed)
|
||||||
|
|
||||||
|
def getvalue(self):
|
||||||
|
"""
|
||||||
|
Retrieve the entire contents of the "file" at any time before
|
||||||
|
the StringIO object's close() method is called.
|
||||||
|
|
||||||
|
The StringIO object can accept either Unicode or 8-bit strings,
|
||||||
|
but mixing the two may take some care. If both are used, 8-bit
|
||||||
|
strings that cannot be interpreted as 7-bit ASCII (that use the
|
||||||
|
8th bit) will cause a UnicodeError to be raised when getvalue()
|
||||||
|
is called.
|
||||||
|
"""
|
||||||
|
if self.buflist:
|
||||||
|
self.buf += ''.join(self.buflist)
|
||||||
|
self.buflist = []
|
||||||
|
return self.buf
|
||||||
|
|
||||||
|
|
||||||
|
# A little test suite
|
||||||
|
|
||||||
|
def test():
|
||||||
|
import sys
|
||||||
|
if sys.argv[1:]:
|
||||||
|
file = sys.argv[1]
|
||||||
|
else:
|
||||||
|
file = '/etc/passwd'
|
||||||
|
lines = open(file, 'r').readlines()
|
||||||
|
text = open(file, 'r').read()
|
||||||
|
f = StringIO()
|
||||||
|
for line in lines[:-2]:
|
||||||
|
f.write(line)
|
||||||
|
f.writelines(lines[-2:])
|
||||||
|
if f.getvalue() != text:
|
||||||
|
raise RuntimeError, 'write failed'
|
||||||
|
length = f.tell()
|
||||||
|
print 'File length =', length
|
||||||
|
f.seek(len(lines[0]))
|
||||||
|
f.write(lines[1])
|
||||||
|
f.seek(0)
|
||||||
|
print 'First line =', repr(f.readline())
|
||||||
|
print 'Position =', f.tell()
|
||||||
|
line = f.readline()
|
||||||
|
print 'Second line =', repr(line)
|
||||||
|
f.seek(-len(line), 1)
|
||||||
|
line2 = f.read(len(line))
|
||||||
|
if line != line2:
|
||||||
|
raise RuntimeError, 'bad result after seek back'
|
||||||
|
f.seek(len(line2), 1)
|
||||||
|
list = f.readlines()
|
||||||
|
line = list[-1]
|
||||||
|
f.seek(f.tell() - len(line))
|
||||||
|
line2 = f.read()
|
||||||
|
if line != line2:
|
||||||
|
raise RuntimeError, 'bad result after seek back from EOF'
|
||||||
|
print 'Read', len(list), 'more lines'
|
||||||
|
print 'File length =', f.tell()
|
||||||
|
if f.tell() != length:
|
||||||
|
raise RuntimeError, 'bad length'
|
||||||
|
f.truncate(length/2)
|
||||||
|
f.seek(0, 2)
|
||||||
|
print 'Truncated length =', f.tell()
|
||||||
|
if f.tell() != length/2:
|
||||||
|
raise RuntimeError, 'truncate did not adjust length'
|
||||||
|
f.close()
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
test()
|
BIN
icarus-miner/data/usr/lib/python2.6/StringIO.pyc
Normal file
BIN
icarus-miner/data/usr/lib/python2.6/StringIO.pyc
Normal file
Binary file not shown.
179
icarus-miner/data/usr/lib/python2.6/UserDict.py
Normal file
179
icarus-miner/data/usr/lib/python2.6/UserDict.py
Normal file
@ -0,0 +1,179 @@
|
|||||||
|
"""A more or less complete user-defined wrapper around dictionary objects."""
|
||||||
|
|
||||||
|
class UserDict:
|
||||||
|
def __init__(self, dict=None, **kwargs):
|
||||||
|
self.data = {}
|
||||||
|
if dict is not None:
|
||||||
|
self.update(dict)
|
||||||
|
if len(kwargs):
|
||||||
|
self.update(kwargs)
|
||||||
|
def __repr__(self): return repr(self.data)
|
||||||
|
def __cmp__(self, dict):
|
||||||
|
if isinstance(dict, UserDict):
|
||||||
|
return cmp(self.data, dict.data)
|
||||||
|
else:
|
||||||
|
return cmp(self.data, dict)
|
||||||
|
def __len__(self): return len(self.data)
|
||||||
|
def __getitem__(self, key):
|
||||||
|
if key in self.data:
|
||||||
|
return self.data[key]
|
||||||
|
if hasattr(self.__class__, "__missing__"):
|
||||||
|
return self.__class__.__missing__(self, key)
|
||||||
|
raise KeyError(key)
|
||||||
|
def __setitem__(self, key, item): self.data[key] = item
|
||||||
|
def __delitem__(self, key): del self.data[key]
|
||||||
|
def clear(self): self.data.clear()
|
||||||
|
def copy(self):
|
||||||
|
if self.__class__ is UserDict:
|
||||||
|
return UserDict(self.data.copy())
|
||||||
|
import copy
|
||||||
|
data = self.data
|
||||||
|
try:
|
||||||
|
self.data = {}
|
||||||
|
c = copy.copy(self)
|
||||||
|
finally:
|
||||||
|
self.data = data
|
||||||
|
c.update(self)
|
||||||
|
return c
|
||||||
|
def keys(self): return self.data.keys()
|
||||||
|
def items(self): return self.data.items()
|
||||||
|
def iteritems(self): return self.data.iteritems()
|
||||||
|
def iterkeys(self): return self.data.iterkeys()
|
||||||
|
def itervalues(self): return self.data.itervalues()
|
||||||
|
def values(self): return self.data.values()
|
||||||
|
def has_key(self, key): return key in self.data
|
||||||
|
def update(self, dict=None, **kwargs):
|
||||||
|
if dict is None:
|
||||||
|
pass
|
||||||
|
elif isinstance(dict, UserDict):
|
||||||
|
self.data.update(dict.data)
|
||||||
|
elif isinstance(dict, type({})) or not hasattr(dict, 'items'):
|
||||||
|
self.data.update(dict)
|
||||||
|
else:
|
||||||
|
for k, v in dict.items():
|
||||||
|
self[k] = v
|
||||||
|
if len(kwargs):
|
||||||
|
self.data.update(kwargs)
|
||||||
|
def get(self, key, failobj=None):
|
||||||
|
if key not in self:
|
||||||
|
return failobj
|
||||||
|
return self[key]
|
||||||
|
def setdefault(self, key, failobj=None):
|
||||||
|
if key not in self:
|
||||||
|
self[key] = failobj
|
||||||
|
return self[key]
|
||||||
|
def pop(self, key, *args):
|
||||||
|
return self.data.pop(key, *args)
|
||||||
|
def popitem(self):
|
||||||
|
return self.data.popitem()
|
||||||
|
def __contains__(self, key):
|
||||||
|
return key in self.data
|
||||||
|
@classmethod
|
||||||
|
def fromkeys(cls, iterable, value=None):
|
||||||
|
d = cls()
|
||||||
|
for key in iterable:
|
||||||
|
d[key] = value
|
||||||
|
return d
|
||||||
|
|
||||||
|
class IterableUserDict(UserDict):
|
||||||
|
def __iter__(self):
|
||||||
|
return iter(self.data)
|
||||||
|
|
||||||
|
import _abcoll
|
||||||
|
_abcoll.MutableMapping.register(IterableUserDict)
|
||||||
|
|
||||||
|
|
||||||
|
class DictMixin:
|
||||||
|
# Mixin defining all dictionary methods for classes that already have
|
||||||
|
# a minimum dictionary interface including getitem, setitem, delitem,
|
||||||
|
# and keys. Without knowledge of the subclass constructor, the mixin
|
||||||
|
# does not define __init__() or copy(). In addition to the four base
|
||||||
|
# methods, progressively more efficiency comes with defining
|
||||||
|
# __contains__(), __iter__(), and iteritems().
|
||||||
|
|
||||||
|
# second level definitions support higher levels
|
||||||
|
def __iter__(self):
|
||||||
|
for k in self.keys():
|
||||||
|
yield k
|
||||||
|
def has_key(self, key):
|
||||||
|
try:
|
||||||
|
value = self[key]
|
||||||
|
except KeyError:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
def __contains__(self, key):
|
||||||
|
return self.has_key(key)
|
||||||
|
|
||||||
|
# third level takes advantage of second level definitions
|
||||||
|
def iteritems(self):
|
||||||
|
for k in self:
|
||||||
|
yield (k, self[k])
|
||||||
|
def iterkeys(self):
|
||||||
|
return self.__iter__()
|
||||||
|
|
||||||
|
# fourth level uses definitions from lower levels
|
||||||
|
def itervalues(self):
|
||||||
|
for _, v in self.iteritems():
|
||||||
|
yield v
|
||||||
|
def values(self):
|
||||||
|
return [v for _, v in self.iteritems()]
|
||||||
|
def items(self):
|
||||||
|
return list(self.iteritems())
|
||||||
|
def clear(self):
|
||||||
|
for key in self.keys():
|
||||||
|
del self[key]
|
||||||
|
def setdefault(self, key, default=None):
|
||||||
|
try:
|
||||||
|
return self[key]
|
||||||
|
except KeyError:
|
||||||
|
self[key] = default
|
||||||
|
return default
|
||||||
|
def pop(self, key, *args):
|
||||||
|
if len(args) > 1:
|
||||||
|
raise TypeError, "pop expected at most 2 arguments, got "\
|
||||||
|
+ repr(1 + len(args))
|
||||||
|
try:
|
||||||
|
value = self[key]
|
||||||
|
except KeyError:
|
||||||
|
if args:
|
||||||
|
return args[0]
|
||||||
|
raise
|
||||||
|
del self[key]
|
||||||
|
return value
|
||||||
|
def popitem(self):
|
||||||
|
try:
|
||||||
|
k, v = self.iteritems().next()
|
||||||
|
except StopIteration:
|
||||||
|
raise KeyError, 'container is empty'
|
||||||
|
del self[k]
|
||||||
|
return (k, v)
|
||||||
|
def update(self, other=None, **kwargs):
|
||||||
|
# Make progressively weaker assumptions about "other"
|
||||||
|
if other is None:
|
||||||
|
pass
|
||||||
|
elif hasattr(other, 'iteritems'): # iteritems saves memory and lookups
|
||||||
|
for k, v in other.iteritems():
|
||||||
|
self[k] = v
|
||||||
|
elif hasattr(other, 'keys'):
|
||||||
|
for k in other.keys():
|
||||||
|
self[k] = other[k]
|
||||||
|
else:
|
||||||
|
for k, v in other:
|
||||||
|
self[k] = v
|
||||||
|
if kwargs:
|
||||||
|
self.update(kwargs)
|
||||||
|
def get(self, key, default=None):
|
||||||
|
try:
|
||||||
|
return self[key]
|
||||||
|
except KeyError:
|
||||||
|
return default
|
||||||
|
def __repr__(self):
|
||||||
|
return repr(dict(self.iteritems()))
|
||||||
|
def __cmp__(self, other):
|
||||||
|
if other is None:
|
||||||
|
return 1
|
||||||
|
if isinstance(other, DictMixin):
|
||||||
|
other = dict(other.iteritems())
|
||||||
|
return cmp(dict(self.iteritems()), other)
|
||||||
|
def __len__(self):
|
||||||
|
return len(self.keys())
|
BIN
icarus-miner/data/usr/lib/python2.6/UserDict.pyc
Normal file
BIN
icarus-miner/data/usr/lib/python2.6/UserDict.pyc
Normal file
Binary file not shown.
157
icarus-miner/data/usr/lib/python2.6/__init__.py
Normal file
157
icarus-miner/data/usr/lib/python2.6/__init__.py
Normal file
@ -0,0 +1,157 @@
|
|||||||
|
""" Standard "encodings" Package
|
||||||
|
|
||||||
|
Standard Python encoding modules are stored in this package
|
||||||
|
directory.
|
||||||
|
|
||||||
|
Codec modules must have names corresponding to normalized encoding
|
||||||
|
names as defined in the normalize_encoding() function below, e.g.
|
||||||
|
'utf-8' must be implemented by the module 'utf_8.py'.
|
||||||
|
|
||||||
|
Each codec module must export the following interface:
|
||||||
|
|
||||||
|
* getregentry() -> codecs.CodecInfo object
|
||||||
|
The getregentry() API must a CodecInfo object with encoder, decoder,
|
||||||
|
incrementalencoder, incrementaldecoder, streamwriter and streamreader
|
||||||
|
atttributes which adhere to the Python Codec Interface Standard.
|
||||||
|
|
||||||
|
In addition, a module may optionally also define the following
|
||||||
|
APIs which are then used by the package's codec search function:
|
||||||
|
|
||||||
|
* getaliases() -> sequence of encoding name strings to use as aliases
|
||||||
|
|
||||||
|
Alias names returned by getaliases() must be normalized encoding
|
||||||
|
names as defined by normalize_encoding().
|
||||||
|
|
||||||
|
Written by Marc-Andre Lemburg (mal@lemburg.com).
|
||||||
|
|
||||||
|
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
|
||||||
|
|
||||||
|
"""#"
|
||||||
|
|
||||||
|
import codecs
|
||||||
|
from encodings import aliases
|
||||||
|
import __builtin__
|
||||||
|
|
||||||
|
_cache = {}
|
||||||
|
_unknown = '--unknown--'
|
||||||
|
_import_tail = ['*']
|
||||||
|
_norm_encoding_map = (' . '
|
||||||
|
'0123456789 ABCDEFGHIJKLMNOPQRSTUVWXYZ '
|
||||||
|
' abcdefghijklmnopqrstuvwxyz '
|
||||||
|
' '
|
||||||
|
' '
|
||||||
|
' ')
|
||||||
|
_aliases = aliases.aliases
|
||||||
|
|
||||||
|
class CodecRegistryError(LookupError, SystemError):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def normalize_encoding(encoding):
|
||||||
|
|
||||||
|
""" Normalize an encoding name.
|
||||||
|
|
||||||
|
Normalization works as follows: all non-alphanumeric
|
||||||
|
characters except the dot used for Python package names are
|
||||||
|
collapsed and replaced with a single underscore, e.g. ' -;#'
|
||||||
|
becomes '_'. Leading and trailing underscores are removed.
|
||||||
|
|
||||||
|
Note that encoding names should be ASCII only; if they do use
|
||||||
|
non-ASCII characters, these must be Latin-1 compatible.
|
||||||
|
|
||||||
|
"""
|
||||||
|
# Make sure we have an 8-bit string, because .translate() works
|
||||||
|
# differently for Unicode strings.
|
||||||
|
if hasattr(__builtin__, "unicode") and isinstance(encoding, unicode):
|
||||||
|
# Note that .encode('latin-1') does *not* use the codec
|
||||||
|
# registry, so this call doesn't recurse. (See unicodeobject.c
|
||||||
|
# PyUnicode_AsEncodedString() for details)
|
||||||
|
encoding = encoding.encode('latin-1')
|
||||||
|
return '_'.join(encoding.translate(_norm_encoding_map).split())
|
||||||
|
|
||||||
|
def search_function(encoding):
|
||||||
|
|
||||||
|
# Cache lookup
|
||||||
|
entry = _cache.get(encoding, _unknown)
|
||||||
|
if entry is not _unknown:
|
||||||
|
return entry
|
||||||
|
|
||||||
|
# Import the module:
|
||||||
|
#
|
||||||
|
# First try to find an alias for the normalized encoding
|
||||||
|
# name and lookup the module using the aliased name, then try to
|
||||||
|
# lookup the module using the standard import scheme, i.e. first
|
||||||
|
# try in the encodings package, then at top-level.
|
||||||
|
#
|
||||||
|
norm_encoding = normalize_encoding(encoding)
|
||||||
|
aliased_encoding = _aliases.get(norm_encoding) or \
|
||||||
|
_aliases.get(norm_encoding.replace('.', '_'))
|
||||||
|
if aliased_encoding is not None:
|
||||||
|
modnames = [aliased_encoding,
|
||||||
|
norm_encoding]
|
||||||
|
else:
|
||||||
|
modnames = [norm_encoding]
|
||||||
|
for modname in modnames:
|
||||||
|
if not modname or '.' in modname:
|
||||||
|
continue
|
||||||
|
try:
|
||||||
|
# Import is absolute to prevent the possibly malicious import of a
|
||||||
|
# module with side-effects that is not in the 'encodings' package.
|
||||||
|
mod = __import__('encodings.' + modname, fromlist=_import_tail,
|
||||||
|
level=0)
|
||||||
|
except ImportError:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
mod = None
|
||||||
|
|
||||||
|
try:
|
||||||
|
getregentry = mod.getregentry
|
||||||
|
except AttributeError:
|
||||||
|
# Not a codec module
|
||||||
|
mod = None
|
||||||
|
|
||||||
|
if mod is None:
|
||||||
|
# Cache misses
|
||||||
|
_cache[encoding] = None
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Now ask the module for the registry entry
|
||||||
|
entry = getregentry()
|
||||||
|
if not isinstance(entry, codecs.CodecInfo):
|
||||||
|
if not 4 <= len(entry) <= 7:
|
||||||
|
raise CodecRegistryError,\
|
||||||
|
'module "%s" (%s) failed to register' % \
|
||||||
|
(mod.__name__, mod.__file__)
|
||||||
|
if not callable(entry[0]) or \
|
||||||
|
not callable(entry[1]) or \
|
||||||
|
(entry[2] is not None and not callable(entry[2])) or \
|
||||||
|
(entry[3] is not None and not callable(entry[3])) or \
|
||||||
|
(len(entry) > 4 and entry[4] is not None and not callable(entry[4])) or \
|
||||||
|
(len(entry) > 5 and entry[5] is not None and not callable(entry[5])):
|
||||||
|
raise CodecRegistryError,\
|
||||||
|
'incompatible codecs in module "%s" (%s)' % \
|
||||||
|
(mod.__name__, mod.__file__)
|
||||||
|
if len(entry)<7 or entry[6] is None:
|
||||||
|
entry += (None,)*(6-len(entry)) + (mod.__name__.split(".", 1)[1],)
|
||||||
|
entry = codecs.CodecInfo(*entry)
|
||||||
|
|
||||||
|
# Cache the codec registry entry
|
||||||
|
_cache[encoding] = entry
|
||||||
|
|
||||||
|
# Register its aliases (without overwriting previously registered
|
||||||
|
# aliases)
|
||||||
|
try:
|
||||||
|
codecaliases = mod.getaliases()
|
||||||
|
except AttributeError:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
for alias in codecaliases:
|
||||||
|
if not _aliases.has_key(alias):
|
||||||
|
_aliases[alias] = modname
|
||||||
|
|
||||||
|
# Return the registry entry
|
||||||
|
return entry
|
||||||
|
|
||||||
|
# Register the search_function in the Python codec registry
|
||||||
|
codecs.register(search_function)
|
BIN
icarus-miner/data/usr/lib/python2.6/__init__.pyc
Normal file
BIN
icarus-miner/data/usr/lib/python2.6/__init__.pyc
Normal file
Binary file not shown.
562
icarus-miner/data/usr/lib/python2.6/_abcoll.py
Normal file
562
icarus-miner/data/usr/lib/python2.6/_abcoll.py
Normal file
@ -0,0 +1,562 @@
|
|||||||
|
# Copyright 2007 Google, Inc. All Rights Reserved.
|
||||||
|
# Licensed to PSF under a Contributor Agreement.
|
||||||
|
|
||||||
|
"""Abstract Base Classes (ABCs) for collections, according to PEP 3119.
|
||||||
|
|
||||||
|
DON'T USE THIS MODULE DIRECTLY! The classes here should be imported
|
||||||
|
via collections; they are defined here only to alleviate certain
|
||||||
|
bootstrapping issues. Unit tests are in test_collections.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from abc import ABCMeta, abstractmethod
|
||||||
|
import sys
|
||||||
|
|
||||||
|
__all__ = ["Hashable", "Iterable", "Iterator",
|
||||||
|
"Sized", "Container", "Callable",
|
||||||
|
"Set", "MutableSet",
|
||||||
|
"Mapping", "MutableMapping",
|
||||||
|
"MappingView", "KeysView", "ItemsView", "ValuesView",
|
||||||
|
"Sequence", "MutableSequence",
|
||||||
|
]
|
||||||
|
|
||||||
|
### ONE-TRICK PONIES ###
|
||||||
|
|
||||||
|
class Hashable:
|
||||||
|
__metaclass__ = ABCMeta
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def __hash__(self):
|
||||||
|
return 0
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def __subclasshook__(cls, C):
|
||||||
|
if cls is Hashable:
|
||||||
|
for B in C.__mro__:
|
||||||
|
if "__hash__" in B.__dict__:
|
||||||
|
if B.__dict__["__hash__"]:
|
||||||
|
return True
|
||||||
|
break
|
||||||
|
return NotImplemented
|
||||||
|
|
||||||
|
|
||||||
|
class Iterable:
|
||||||
|
__metaclass__ = ABCMeta
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def __iter__(self):
|
||||||
|
while False:
|
||||||
|
yield None
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def __subclasshook__(cls, C):
|
||||||
|
if cls is Iterable:
|
||||||
|
if any("__iter__" in B.__dict__ for B in C.__mro__):
|
||||||
|
return True
|
||||||
|
return NotImplemented
|
||||||
|
|
||||||
|
Iterable.register(str)
|
||||||
|
|
||||||
|
|
||||||
|
class Iterator(Iterable):
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def next(self):
|
||||||
|
raise StopIteration
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
return self
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def __subclasshook__(cls, C):
|
||||||
|
if cls is Iterator:
|
||||||
|
if any("next" in B.__dict__ for B in C.__mro__):
|
||||||
|
return True
|
||||||
|
return NotImplemented
|
||||||
|
|
||||||
|
|
||||||
|
class Sized:
|
||||||
|
__metaclass__ = ABCMeta
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def __len__(self):
|
||||||
|
return 0
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def __subclasshook__(cls, C):
|
||||||
|
if cls is Sized:
|
||||||
|
if any("__len__" in B.__dict__ for B in C.__mro__):
|
||||||
|
return True
|
||||||
|
return NotImplemented
|
||||||
|
|
||||||
|
|
||||||
|
class Container:
|
||||||
|
__metaclass__ = ABCMeta
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def __contains__(self, x):
|
||||||
|
return False
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def __subclasshook__(cls, C):
|
||||||
|
if cls is Container:
|
||||||
|
if any("__contains__" in B.__dict__ for B in C.__mro__):
|
||||||
|
return True
|
||||||
|
return NotImplemented
|
||||||
|
|
||||||
|
|
||||||
|
class Callable:
|
||||||
|
__metaclass__ = ABCMeta
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def __call__(self, *args, **kwds):
|
||||||
|
return False
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def __subclasshook__(cls, C):
|
||||||
|
if cls is Callable:
|
||||||
|
if any("__call__" in B.__dict__ for B in C.__mro__):
|
||||||
|
return True
|
||||||
|
return NotImplemented
|
||||||
|
|
||||||
|
|
||||||
|
### SETS ###
|
||||||
|
|
||||||
|
|
||||||
|
class Set(Sized, Iterable, Container):
|
||||||
|
"""A set is a finite, iterable container.
|
||||||
|
|
||||||
|
This class provides concrete generic implementations of all
|
||||||
|
methods except for __contains__, __iter__ and __len__.
|
||||||
|
|
||||||
|
To override the comparisons (presumably for speed, as the
|
||||||
|
semantics are fixed), all you have to do is redefine __le__ and
|
||||||
|
then the other operations will automatically follow suit.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __le__(self, other):
|
||||||
|
if not isinstance(other, Set):
|
||||||
|
return NotImplemented
|
||||||
|
if len(self) > len(other):
|
||||||
|
return False
|
||||||
|
for elem in self:
|
||||||
|
if elem not in other:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
def __lt__(self, other):
|
||||||
|
if not isinstance(other, Set):
|
||||||
|
return NotImplemented
|
||||||
|
return len(self) < len(other) and self.__le__(other)
|
||||||
|
|
||||||
|
def __gt__(self, other):
|
||||||
|
if not isinstance(other, Set):
|
||||||
|
return NotImplemented
|
||||||
|
return other < self
|
||||||
|
|
||||||
|
def __ge__(self, other):
|
||||||
|
if not isinstance(other, Set):
|
||||||
|
return NotImplemented
|
||||||
|
return other <= self
|
||||||
|
|
||||||
|
def __eq__(self, other):
|
||||||
|
if not isinstance(other, Set):
|
||||||
|
return NotImplemented
|
||||||
|
return len(self) == len(other) and self.__le__(other)
|
||||||
|
|
||||||
|
def __ne__(self, other):
|
||||||
|
return not (self == other)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def _from_iterable(cls, it):
|
||||||
|
'''Construct an instance of the class from any iterable input.
|
||||||
|
|
||||||
|
Must override this method if the class constructor signature
|
||||||
|
does not accept an iterable for an input.
|
||||||
|
'''
|
||||||
|
return cls(it)
|
||||||
|
|
||||||
|
def __and__(self, other):
|
||||||
|
if not isinstance(other, Iterable):
|
||||||
|
return NotImplemented
|
||||||
|
return self._from_iterable(value for value in other if value in self)
|
||||||
|
|
||||||
|
def isdisjoint(self, other):
|
||||||
|
for value in other:
|
||||||
|
if value in self:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
def __or__(self, other):
|
||||||
|
if not isinstance(other, Iterable):
|
||||||
|
return NotImplemented
|
||||||
|
chain = (e for s in (self, other) for e in s)
|
||||||
|
return self._from_iterable(chain)
|
||||||
|
|
||||||
|
def __sub__(self, other):
|
||||||
|
if not isinstance(other, Set):
|
||||||
|
if not isinstance(other, Iterable):
|
||||||
|
return NotImplemented
|
||||||
|
other = self._from_iterable(other)
|
||||||
|
return self._from_iterable(value for value in self
|
||||||
|
if value not in other)
|
||||||
|
|
||||||
|
def __xor__(self, other):
|
||||||
|
if not isinstance(other, Set):
|
||||||
|
if not isinstance(other, Iterable):
|
||||||
|
return NotImplemented
|
||||||
|
other = self._from_iterable(other)
|
||||||
|
return (self - other) | (other - self)
|
||||||
|
|
||||||
|
# Sets are not hashable by default, but subclasses can change this
|
||||||
|
__hash__ = None
|
||||||
|
|
||||||
|
def _hash(self):
|
||||||
|
"""Compute the hash value of a set.
|
||||||
|
|
||||||
|
Note that we don't define __hash__: not all sets are hashable.
|
||||||
|
But if you define a hashable set type, its __hash__ should
|
||||||
|
call this function.
|
||||||
|
|
||||||
|
This must be compatible __eq__.
|
||||||
|
|
||||||
|
All sets ought to compare equal if they contain the same
|
||||||
|
elements, regardless of how they are implemented, and
|
||||||
|
regardless of the order of the elements; so there's not much
|
||||||
|
freedom for __eq__ or __hash__. We match the algorithm used
|
||||||
|
by the built-in frozenset type.
|
||||||
|
"""
|
||||||
|
MAX = sys.maxint
|
||||||
|
MASK = 2 * MAX + 1
|
||||||
|
n = len(self)
|
||||||
|
h = 1927868237 * (n + 1)
|
||||||
|
h &= MASK
|
||||||
|
for x in self:
|
||||||
|
hx = hash(x)
|
||||||
|
h ^= (hx ^ (hx << 16) ^ 89869747) * 3644798167
|
||||||
|
h &= MASK
|
||||||
|
h = h * 69069 + 907133923
|
||||||
|
h &= MASK
|
||||||
|
if h > MAX:
|
||||||
|
h -= MASK + 1
|
||||||
|
if h == -1:
|
||||||
|
h = 590923713
|
||||||
|
return h
|
||||||
|
|
||||||
|
Set.register(frozenset)
|
||||||
|
|
||||||
|
|
||||||
|
class MutableSet(Set):
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def add(self, value):
|
||||||
|
"""Add an element."""
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def discard(self, value):
|
||||||
|
"""Remove an element. Do not raise an exception if absent."""
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
def remove(self, value):
|
||||||
|
"""Remove an element. If not a member, raise a KeyError."""
|
||||||
|
if value not in self:
|
||||||
|
raise KeyError(value)
|
||||||
|
self.discard(value)
|
||||||
|
|
||||||
|
def pop(self):
|
||||||
|
"""Return the popped value. Raise KeyError if empty."""
|
||||||
|
it = iter(self)
|
||||||
|
try:
|
||||||
|
value = next(it)
|
||||||
|
except StopIteration:
|
||||||
|
raise KeyError
|
||||||
|
self.discard(value)
|
||||||
|
return value
|
||||||
|
|
||||||
|
def clear(self):
|
||||||
|
"""This is slow (creates N new iterators!) but effective."""
|
||||||
|
try:
|
||||||
|
while True:
|
||||||
|
self.pop()
|
||||||
|
except KeyError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def __ior__(self, it):
|
||||||
|
for value in it:
|
||||||
|
self.add(value)
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __iand__(self, it):
|
||||||
|
for value in (self - it):
|
||||||
|
self.discard(value)
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __ixor__(self, it):
|
||||||
|
if not isinstance(it, Set):
|
||||||
|
it = self._from_iterable(it)
|
||||||
|
for value in it:
|
||||||
|
if value in self:
|
||||||
|
self.discard(value)
|
||||||
|
else:
|
||||||
|
self.add(value)
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __isub__(self, it):
|
||||||
|
for value in it:
|
||||||
|
self.discard(value)
|
||||||
|
return self
|
||||||
|
|
||||||
|
MutableSet.register(set)
|
||||||
|
|
||||||
|
|
||||||
|
### MAPPINGS ###
|
||||||
|
|
||||||
|
|
||||||
|
class Mapping(Sized, Iterable, Container):
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def __getitem__(self, key):
|
||||||
|
raise KeyError
|
||||||
|
|
||||||
|
def get(self, key, default=None):
|
||||||
|
try:
|
||||||
|
return self[key]
|
||||||
|
except KeyError:
|
||||||
|
return default
|
||||||
|
|
||||||
|
def __contains__(self, key):
|
||||||
|
try:
|
||||||
|
self[key]
|
||||||
|
except KeyError:
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
return True
|
||||||
|
|
||||||
|
def iterkeys(self):
|
||||||
|
return iter(self)
|
||||||
|
|
||||||
|
def itervalues(self):
|
||||||
|
for key in self:
|
||||||
|
yield self[key]
|
||||||
|
|
||||||
|
def iteritems(self):
|
||||||
|
for key in self:
|
||||||
|
yield (key, self[key])
|
||||||
|
|
||||||
|
def keys(self):
|
||||||
|
return list(self)
|
||||||
|
|
||||||
|
def items(self):
|
||||||
|
return [(key, self[key]) for key in self]
|
||||||
|
|
||||||
|
def values(self):
|
||||||
|
return [self[key] for key in self]
|
||||||
|
|
||||||
|
# Mappings are not hashable by default, but subclasses can change this
|
||||||
|
__hash__ = None
|
||||||
|
|
||||||
|
def __eq__(self, other):
|
||||||
|
return isinstance(other, Mapping) and \
|
||||||
|
dict(self.items()) == dict(other.items())
|
||||||
|
|
||||||
|
def __ne__(self, other):
|
||||||
|
return not (self == other)
|
||||||
|
|
||||||
|
class MappingView(Sized):
|
||||||
|
|
||||||
|
def __init__(self, mapping):
|
||||||
|
self._mapping = mapping
|
||||||
|
|
||||||
|
def __len__(self):
|
||||||
|
return len(self._mapping)
|
||||||
|
|
||||||
|
|
||||||
|
class KeysView(MappingView, Set):
|
||||||
|
|
||||||
|
def __contains__(self, key):
|
||||||
|
return key in self._mapping
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
for key in self._mapping:
|
||||||
|
yield key
|
||||||
|
|
||||||
|
|
||||||
|
class ItemsView(MappingView, Set):
|
||||||
|
|
||||||
|
def __contains__(self, item):
|
||||||
|
key, value = item
|
||||||
|
try:
|
||||||
|
v = self._mapping[key]
|
||||||
|
except KeyError:
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
return v == value
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
for key in self._mapping:
|
||||||
|
yield (key, self._mapping[key])
|
||||||
|
|
||||||
|
|
||||||
|
class ValuesView(MappingView):
|
||||||
|
|
||||||
|
def __contains__(self, value):
|
||||||
|
for key in self._mapping:
|
||||||
|
if value == self._mapping[key]:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
for key in self._mapping:
|
||||||
|
yield self._mapping[key]
|
||||||
|
|
||||||
|
|
||||||
|
class MutableMapping(Mapping):
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def __setitem__(self, key, value):
|
||||||
|
raise KeyError
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def __delitem__(self, key):
|
||||||
|
raise KeyError
|
||||||
|
|
||||||
|
__marker = object()
|
||||||
|
|
||||||
|
def pop(self, key, default=__marker):
|
||||||
|
try:
|
||||||
|
value = self[key]
|
||||||
|
except KeyError:
|
||||||
|
if default is self.__marker:
|
||||||
|
raise
|
||||||
|
return default
|
||||||
|
else:
|
||||||
|
del self[key]
|
||||||
|
return value
|
||||||
|
|
||||||
|
def popitem(self):
|
||||||
|
try:
|
||||||
|
key = next(iter(self))
|
||||||
|
except StopIteration:
|
||||||
|
raise KeyError
|
||||||
|
value = self[key]
|
||||||
|
del self[key]
|
||||||
|
return key, value
|
||||||
|
|
||||||
|
def clear(self):
|
||||||
|
try:
|
||||||
|
while True:
|
||||||
|
self.popitem()
|
||||||
|
except KeyError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def update(self, other=(), **kwds):
|
||||||
|
if isinstance(other, Mapping):
|
||||||
|
for key in other:
|
||||||
|
self[key] = other[key]
|
||||||
|
elif hasattr(other, "keys"):
|
||||||
|
for key in other.keys():
|
||||||
|
self[key] = other[key]
|
||||||
|
else:
|
||||||
|
for key, value in other:
|
||||||
|
self[key] = value
|
||||||
|
for key, value in kwds.items():
|
||||||
|
self[key] = value
|
||||||
|
|
||||||
|
def setdefault(self, key, default=None):
|
||||||
|
try:
|
||||||
|
return self[key]
|
||||||
|
except KeyError:
|
||||||
|
self[key] = default
|
||||||
|
return default
|
||||||
|
|
||||||
|
MutableMapping.register(dict)
|
||||||
|
|
||||||
|
|
||||||
|
### SEQUENCES ###
|
||||||
|
|
||||||
|
|
||||||
|
class Sequence(Sized, Iterable, Container):
|
||||||
|
"""All the operations on a read-only sequence.
|
||||||
|
|
||||||
|
Concrete subclasses must override __new__ or __init__,
|
||||||
|
__getitem__, and __len__.
|
||||||
|
"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def __getitem__(self, index):
|
||||||
|
raise IndexError
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
i = 0
|
||||||
|
try:
|
||||||
|
while True:
|
||||||
|
v = self[i]
|
||||||
|
yield v
|
||||||
|
i += 1
|
||||||
|
except IndexError:
|
||||||
|
return
|
||||||
|
|
||||||
|
def __contains__(self, value):
|
||||||
|
for v in self:
|
||||||
|
if v == value:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
def __reversed__(self):
|
||||||
|
for i in reversed(range(len(self))):
|
||||||
|
yield self[i]
|
||||||
|
|
||||||
|
def index(self, value):
|
||||||
|
for i, v in enumerate(self):
|
||||||
|
if v == value:
|
||||||
|
return i
|
||||||
|
raise ValueError
|
||||||
|
|
||||||
|
def count(self, value):
|
||||||
|
return sum(1 for v in self if v == value)
|
||||||
|
|
||||||
|
Sequence.register(tuple)
|
||||||
|
Sequence.register(basestring)
|
||||||
|
Sequence.register(buffer)
|
||||||
|
Sequence.register(xrange)
|
||||||
|
|
||||||
|
|
||||||
|
class MutableSequence(Sequence):
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def __setitem__(self, index, value):
|
||||||
|
raise IndexError
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def __delitem__(self, index):
|
||||||
|
raise IndexError
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def insert(self, index, value):
|
||||||
|
raise IndexError
|
||||||
|
|
||||||
|
def append(self, value):
|
||||||
|
self.insert(len(self), value)
|
||||||
|
|
||||||
|
def reverse(self):
|
||||||
|
n = len(self)
|
||||||
|
for i in range(n//2):
|
||||||
|
self[i], self[n-i-1] = self[n-i-1], self[i]
|
||||||
|
|
||||||
|
def extend(self, values):
|
||||||
|
for v in values:
|
||||||
|
self.append(v)
|
||||||
|
|
||||||
|
def pop(self, index=-1):
|
||||||
|
v = self[index]
|
||||||
|
del self[index]
|
||||||
|
return v
|
||||||
|
|
||||||
|
def remove(self, value):
|
||||||
|
del self[self.index(value)]
|
||||||
|
|
||||||
|
def __iadd__(self, values):
|
||||||
|
self.extend(values)
|
||||||
|
return self
|
||||||
|
|
||||||
|
MutableSequence.register(list)
|
BIN
icarus-miner/data/usr/lib/python2.6/_abcoll.pyc
Normal file
BIN
icarus-miner/data/usr/lib/python2.6/_abcoll.pyc
Normal file
Binary file not shown.
174
icarus-miner/data/usr/lib/python2.6/abc.py
Normal file
174
icarus-miner/data/usr/lib/python2.6/abc.py
Normal file
@ -0,0 +1,174 @@
|
|||||||
|
# Copyright 2007 Google, Inc. All Rights Reserved.
|
||||||
|
# Licensed to PSF under a Contributor Agreement.
|
||||||
|
|
||||||
|
"""Abstract Base Classes (ABCs) according to PEP 3119."""
|
||||||
|
|
||||||
|
|
||||||
|
def abstractmethod(funcobj):
|
||||||
|
"""A decorator indicating abstract methods.
|
||||||
|
|
||||||
|
Requires that the metaclass is ABCMeta or derived from it. A
|
||||||
|
class that has a metaclass derived from ABCMeta cannot be
|
||||||
|
instantiated unless all of its abstract methods are overridden.
|
||||||
|
The abstract methods can be called using any of the normal
|
||||||
|
'super' call mechanisms.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
|
||||||
|
class C:
|
||||||
|
__metaclass__ = ABCMeta
|
||||||
|
@abstractmethod
|
||||||
|
def my_abstract_method(self, ...):
|
||||||
|
...
|
||||||
|
"""
|
||||||
|
funcobj.__isabstractmethod__ = True
|
||||||
|
return funcobj
|
||||||
|
|
||||||
|
|
||||||
|
class abstractproperty(property):
|
||||||
|
"""A decorator indicating abstract properties.
|
||||||
|
|
||||||
|
Requires that the metaclass is ABCMeta or derived from it. A
|
||||||
|
class that has a metaclass derived from ABCMeta cannot be
|
||||||
|
instantiated unless all of its abstract properties are overridden.
|
||||||
|
The abstract properties can be called using any of the normal
|
||||||
|
'super' call mechanisms.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
|
||||||
|
class C:
|
||||||
|
__metaclass__ = ABCMeta
|
||||||
|
@abstractproperty
|
||||||
|
def my_abstract_property(self):
|
||||||
|
...
|
||||||
|
|
||||||
|
This defines a read-only property; you can also define a read-write
|
||||||
|
abstract property using the 'long' form of property declaration:
|
||||||
|
|
||||||
|
class C:
|
||||||
|
__metaclass__ = ABCMeta
|
||||||
|
def getx(self): ...
|
||||||
|
def setx(self, value): ...
|
||||||
|
x = abstractproperty(getx, setx)
|
||||||
|
"""
|
||||||
|
__isabstractmethod__ = True
|
||||||
|
|
||||||
|
|
||||||
|
class ABCMeta(type):
|
||||||
|
|
||||||
|
"""Metaclass for defining Abstract Base Classes (ABCs).
|
||||||
|
|
||||||
|
Use this metaclass to create an ABC. An ABC can be subclassed
|
||||||
|
directly, and then acts as a mix-in class. You can also register
|
||||||
|
unrelated concrete classes (even built-in classes) and unrelated
|
||||||
|
ABCs as 'virtual subclasses' -- these and their descendants will
|
||||||
|
be considered subclasses of the registering ABC by the built-in
|
||||||
|
issubclass() function, but the registering ABC won't show up in
|
||||||
|
their MRO (Method Resolution Order) nor will method
|
||||||
|
implementations defined by the registering ABC be callable (not
|
||||||
|
even via super()).
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
# A global counter that is incremented each time a class is
|
||||||
|
# registered as a virtual subclass of anything. It forces the
|
||||||
|
# negative cache to be cleared before its next use.
|
||||||
|
_abc_invalidation_counter = 0
|
||||||
|
|
||||||
|
def __new__(mcls, name, bases, namespace):
|
||||||
|
cls = super(ABCMeta, mcls).__new__(mcls, name, bases, namespace)
|
||||||
|
# Compute set of abstract method names
|
||||||
|
abstracts = set(name
|
||||||
|
for name, value in namespace.items()
|
||||||
|
if getattr(value, "__isabstractmethod__", False))
|
||||||
|
for base in bases:
|
||||||
|
for name in getattr(base, "__abstractmethods__", set()):
|
||||||
|
value = getattr(cls, name, None)
|
||||||
|
if getattr(value, "__isabstractmethod__", False):
|
||||||
|
abstracts.add(name)
|
||||||
|
cls.__abstractmethods__ = frozenset(abstracts)
|
||||||
|
# Set up inheritance registry
|
||||||
|
cls._abc_registry = set()
|
||||||
|
cls._abc_cache = set()
|
||||||
|
cls._abc_negative_cache = set()
|
||||||
|
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
|
||||||
|
return cls
|
||||||
|
|
||||||
|
def register(cls, subclass):
|
||||||
|
"""Register a virtual subclass of an ABC."""
|
||||||
|
if not isinstance(cls, type):
|
||||||
|
raise TypeError("Can only register classes")
|
||||||
|
if issubclass(subclass, cls):
|
||||||
|
return # Already a subclass
|
||||||
|
# Subtle: test for cycles *after* testing for "already a subclass";
|
||||||
|
# this means we allow X.register(X) and interpret it as a no-op.
|
||||||
|
if issubclass(cls, subclass):
|
||||||
|
# This would create a cycle, which is bad for the algorithm below
|
||||||
|
raise RuntimeError("Refusing to create an inheritance cycle")
|
||||||
|
cls._abc_registry.add(subclass)
|
||||||
|
ABCMeta._abc_invalidation_counter += 1 # Invalidate negative cache
|
||||||
|
|
||||||
|
def _dump_registry(cls, file=None):
|
||||||
|
"""Debug helper to print the ABC registry."""
|
||||||
|
print >> file, "Class: %s.%s" % (cls.__module__, cls.__name__)
|
||||||
|
print >> file, "Inv.counter: %s" % ABCMeta._abc_invalidation_counter
|
||||||
|
for name in sorted(cls.__dict__.keys()):
|
||||||
|
if name.startswith("_abc_"):
|
||||||
|
value = getattr(cls, name)
|
||||||
|
print >> file, "%s: %r" % (name, value)
|
||||||
|
|
||||||
|
def __instancecheck__(cls, instance):
|
||||||
|
"""Override for isinstance(instance, cls)."""
|
||||||
|
# Inline the cache checking when it's simple.
|
||||||
|
subclass = getattr(instance, '__class__', None)
|
||||||
|
if subclass in cls._abc_cache:
|
||||||
|
return True
|
||||||
|
subtype = type(instance)
|
||||||
|
if subtype is subclass or subclass is None:
|
||||||
|
if (cls._abc_negative_cache_version ==
|
||||||
|
ABCMeta._abc_invalidation_counter and
|
||||||
|
subtype in cls._abc_negative_cache):
|
||||||
|
return False
|
||||||
|
# Fall back to the subclass check.
|
||||||
|
return cls.__subclasscheck__(subtype)
|
||||||
|
return (cls.__subclasscheck__(subclass) or
|
||||||
|
cls.__subclasscheck__(subtype))
|
||||||
|
|
||||||
|
def __subclasscheck__(cls, subclass):
|
||||||
|
"""Override for issubclass(subclass, cls)."""
|
||||||
|
# Check cache
|
||||||
|
if subclass in cls._abc_cache:
|
||||||
|
return True
|
||||||
|
# Check negative cache; may have to invalidate
|
||||||
|
if cls._abc_negative_cache_version < ABCMeta._abc_invalidation_counter:
|
||||||
|
# Invalidate the negative cache
|
||||||
|
cls._abc_negative_cache = set()
|
||||||
|
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
|
||||||
|
elif subclass in cls._abc_negative_cache:
|
||||||
|
return False
|
||||||
|
# Check the subclass hook
|
||||||
|
ok = cls.__subclasshook__(subclass)
|
||||||
|
if ok is not NotImplemented:
|
||||||
|
assert isinstance(ok, bool)
|
||||||
|
if ok:
|
||||||
|
cls._abc_cache.add(subclass)
|
||||||
|
else:
|
||||||
|
cls._abc_negative_cache.add(subclass)
|
||||||
|
return ok
|
||||||
|
# Check if it's a direct subclass
|
||||||
|
if cls in getattr(subclass, '__mro__', ()):
|
||||||
|
cls._abc_cache.add(subclass)
|
||||||
|
return True
|
||||||
|
# Check if it's a subclass of a registered class (recursive)
|
||||||
|
for rcls in cls._abc_registry:
|
||||||
|
if issubclass(subclass, rcls):
|
||||||
|
cls._abc_cache.add(subclass)
|
||||||
|
return True
|
||||||
|
# Check if it's a subclass of a subclass (recursive)
|
||||||
|
for scls in cls.__subclasses__():
|
||||||
|
if issubclass(subclass, scls):
|
||||||
|
cls._abc_cache.add(subclass)
|
||||||
|
return True
|
||||||
|
# No dice; update negative cache
|
||||||
|
cls._abc_negative_cache.add(subclass)
|
||||||
|
return False
|
BIN
icarus-miner/data/usr/lib/python2.6/abc.pyc
Normal file
BIN
icarus-miner/data/usr/lib/python2.6/abc.pyc
Normal file
Binary file not shown.
522
icarus-miner/data/usr/lib/python2.6/aliases.py
Normal file
522
icarus-miner/data/usr/lib/python2.6/aliases.py
Normal file
@ -0,0 +1,522 @@
|
|||||||
|
""" Encoding Aliases Support
|
||||||
|
|
||||||
|
This module is used by the encodings package search function to
|
||||||
|
map encodings names to module names.
|
||||||
|
|
||||||
|
Note that the search function normalizes the encoding names before
|
||||||
|
doing the lookup, so the mapping will have to map normalized
|
||||||
|
encoding names to module names.
|
||||||
|
|
||||||
|
Contents:
|
||||||
|
|
||||||
|
The following aliases dictionary contains mappings of all IANA
|
||||||
|
character set names for which the Python core library provides
|
||||||
|
codecs. In addition to these, a few Python specific codec
|
||||||
|
aliases have also been added.
|
||||||
|
|
||||||
|
"""
|
||||||
|
aliases = {
|
||||||
|
|
||||||
|
# Please keep this list sorted alphabetically by value !
|
||||||
|
|
||||||
|
# ascii codec
|
||||||
|
'646' : 'ascii',
|
||||||
|
'ansi_x3.4_1968' : 'ascii',
|
||||||
|
'ansi_x3_4_1968' : 'ascii', # some email headers use this non-standard name
|
||||||
|
'ansi_x3.4_1986' : 'ascii',
|
||||||
|
'cp367' : 'ascii',
|
||||||
|
'csascii' : 'ascii',
|
||||||
|
'ibm367' : 'ascii',
|
||||||
|
'iso646_us' : 'ascii',
|
||||||
|
'iso_646.irv_1991' : 'ascii',
|
||||||
|
'iso_ir_6' : 'ascii',
|
||||||
|
'us' : 'ascii',
|
||||||
|
'us_ascii' : 'ascii',
|
||||||
|
|
||||||
|
# base64_codec codec
|
||||||
|
'base64' : 'base64_codec',
|
||||||
|
'base_64' : 'base64_codec',
|
||||||
|
|
||||||
|
# big5 codec
|
||||||
|
'big5_tw' : 'big5',
|
||||||
|
'csbig5' : 'big5',
|
||||||
|
|
||||||
|
# big5hkscs codec
|
||||||
|
'big5_hkscs' : 'big5hkscs',
|
||||||
|
'hkscs' : 'big5hkscs',
|
||||||
|
|
||||||
|
# bz2_codec codec
|
||||||
|
'bz2' : 'bz2_codec',
|
||||||
|
|
||||||
|
# cp037 codec
|
||||||
|
'037' : 'cp037',
|
||||||
|
'csibm037' : 'cp037',
|
||||||
|
'ebcdic_cp_ca' : 'cp037',
|
||||||
|
'ebcdic_cp_nl' : 'cp037',
|
||||||
|
'ebcdic_cp_us' : 'cp037',
|
||||||
|
'ebcdic_cp_wt' : 'cp037',
|
||||||
|
'ibm037' : 'cp037',
|
||||||
|
'ibm039' : 'cp037',
|
||||||
|
|
||||||
|
# cp1026 codec
|
||||||
|
'1026' : 'cp1026',
|
||||||
|
'csibm1026' : 'cp1026',
|
||||||
|
'ibm1026' : 'cp1026',
|
||||||
|
|
||||||
|
# cp1140 codec
|
||||||
|
'1140' : 'cp1140',
|
||||||
|
'ibm1140' : 'cp1140',
|
||||||
|
|
||||||
|
# cp1250 codec
|
||||||
|
'1250' : 'cp1250',
|
||||||
|
'windows_1250' : 'cp1250',
|
||||||
|
|
||||||
|
# cp1251 codec
|
||||||
|
'1251' : 'cp1251',
|
||||||
|
'windows_1251' : 'cp1251',
|
||||||
|
|
||||||
|
# cp1252 codec
|
||||||
|
'1252' : 'cp1252',
|
||||||
|
'windows_1252' : 'cp1252',
|
||||||
|
|
||||||
|
# cp1253 codec
|
||||||
|
'1253' : 'cp1253',
|
||||||
|
'windows_1253' : 'cp1253',
|
||||||
|
|
||||||
|
# cp1254 codec
|
||||||
|
'1254' : 'cp1254',
|
||||||
|
'windows_1254' : 'cp1254',
|
||||||
|
|
||||||
|
# cp1255 codec
|
||||||
|
'1255' : 'cp1255',
|
||||||
|
'windows_1255' : 'cp1255',
|
||||||
|
|
||||||
|
# cp1256 codec
|
||||||
|
'1256' : 'cp1256',
|
||||||
|
'windows_1256' : 'cp1256',
|
||||||
|
|
||||||
|
# cp1257 codec
|
||||||
|
'1257' : 'cp1257',
|
||||||
|
'windows_1257' : 'cp1257',
|
||||||
|
|
||||||
|
# cp1258 codec
|
||||||
|
'1258' : 'cp1258',
|
||||||
|
'windows_1258' : 'cp1258',
|
||||||
|
|
||||||
|
# cp424 codec
|
||||||
|
'424' : 'cp424',
|
||||||
|
'csibm424' : 'cp424',
|
||||||
|
'ebcdic_cp_he' : 'cp424',
|
||||||
|
'ibm424' : 'cp424',
|
||||||
|
|
||||||
|
# cp437 codec
|
||||||
|
'437' : 'cp437',
|
||||||
|
'cspc8codepage437' : 'cp437',
|
||||||
|
'ibm437' : 'cp437',
|
||||||
|
|
||||||
|
# cp500 codec
|
||||||
|
'500' : 'cp500',
|
||||||
|
'csibm500' : 'cp500',
|
||||||
|
'ebcdic_cp_be' : 'cp500',
|
||||||
|
'ebcdic_cp_ch' : 'cp500',
|
||||||
|
'ibm500' : 'cp500',
|
||||||
|
|
||||||
|
# cp775 codec
|
||||||
|
'775' : 'cp775',
|
||||||
|
'cspc775baltic' : 'cp775',
|
||||||
|
'ibm775' : 'cp775',
|
||||||
|
|
||||||
|
# cp850 codec
|
||||||
|
'850' : 'cp850',
|
||||||
|
'cspc850multilingual' : 'cp850',
|
||||||
|
'ibm850' : 'cp850',
|
||||||
|
|
||||||
|
# cp852 codec
|
||||||
|
'852' : 'cp852',
|
||||||
|
'cspcp852' : 'cp852',
|
||||||
|
'ibm852' : 'cp852',
|
||||||
|
|
||||||
|
# cp855 codec
|
||||||
|
'855' : 'cp855',
|
||||||
|
'csibm855' : 'cp855',
|
||||||
|
'ibm855' : 'cp855',
|
||||||
|
|
||||||
|
# cp857 codec
|
||||||
|
'857' : 'cp857',
|
||||||
|
'csibm857' : 'cp857',
|
||||||
|
'ibm857' : 'cp857',
|
||||||
|
|
||||||
|
# cp860 codec
|
||||||
|
'860' : 'cp860',
|
||||||
|
'csibm860' : 'cp860',
|
||||||
|
'ibm860' : 'cp860',
|
||||||
|
|
||||||
|
# cp861 codec
|
||||||
|
'861' : 'cp861',
|
||||||
|
'cp_is' : 'cp861',
|
||||||
|
'csibm861' : 'cp861',
|
||||||
|
'ibm861' : 'cp861',
|
||||||
|
|
||||||
|
# cp862 codec
|
||||||
|
'862' : 'cp862',
|
||||||
|
'cspc862latinhebrew' : 'cp862',
|
||||||
|
'ibm862' : 'cp862',
|
||||||
|
|
||||||
|
# cp863 codec
|
||||||
|
'863' : 'cp863',
|
||||||
|
'csibm863' : 'cp863',
|
||||||
|
'ibm863' : 'cp863',
|
||||||
|
|
||||||
|
# cp864 codec
|
||||||
|
'864' : 'cp864',
|
||||||
|
'csibm864' : 'cp864',
|
||||||
|
'ibm864' : 'cp864',
|
||||||
|
|
||||||
|
# cp865 codec
|
||||||
|
'865' : 'cp865',
|
||||||
|
'csibm865' : 'cp865',
|
||||||
|
'ibm865' : 'cp865',
|
||||||
|
|
||||||
|
# cp866 codec
|
||||||
|
'866' : 'cp866',
|
||||||
|
'csibm866' : 'cp866',
|
||||||
|
'ibm866' : 'cp866',
|
||||||
|
|
||||||
|
# cp869 codec
|
||||||
|
'869' : 'cp869',
|
||||||
|
'cp_gr' : 'cp869',
|
||||||
|
'csibm869' : 'cp869',
|
||||||
|
'ibm869' : 'cp869',
|
||||||
|
|
||||||
|
# cp932 codec
|
||||||
|
'932' : 'cp932',
|
||||||
|
'ms932' : 'cp932',
|
||||||
|
'mskanji' : 'cp932',
|
||||||
|
'ms_kanji' : 'cp932',
|
||||||
|
|
||||||
|
# cp949 codec
|
||||||
|
'949' : 'cp949',
|
||||||
|
'ms949' : 'cp949',
|
||||||
|
'uhc' : 'cp949',
|
||||||
|
|
||||||
|
# cp950 codec
|
||||||
|
'950' : 'cp950',
|
||||||
|
'ms950' : 'cp950',
|
||||||
|
|
||||||
|
# euc_jis_2004 codec
|
||||||
|
'jisx0213' : 'euc_jis_2004',
|
||||||
|
'eucjis2004' : 'euc_jis_2004',
|
||||||
|
'euc_jis2004' : 'euc_jis_2004',
|
||||||
|
|
||||||
|
# euc_jisx0213 codec
|
||||||
|
'eucjisx0213' : 'euc_jisx0213',
|
||||||
|
|
||||||
|
# euc_jp codec
|
||||||
|
'eucjp' : 'euc_jp',
|
||||||
|
'ujis' : 'euc_jp',
|
||||||
|
'u_jis' : 'euc_jp',
|
||||||
|
|
||||||
|
# euc_kr codec
|
||||||
|
'euckr' : 'euc_kr',
|
||||||
|
'korean' : 'euc_kr',
|
||||||
|
'ksc5601' : 'euc_kr',
|
||||||
|
'ks_c_5601' : 'euc_kr',
|
||||||
|
'ks_c_5601_1987' : 'euc_kr',
|
||||||
|
'ksx1001' : 'euc_kr',
|
||||||
|
'ks_x_1001' : 'euc_kr',
|
||||||
|
|
||||||
|
# gb18030 codec
|
||||||
|
'gb18030_2000' : 'gb18030',
|
||||||
|
|
||||||
|
# gb2312 codec
|
||||||
|
'chinese' : 'gb2312',
|
||||||
|
'csiso58gb231280' : 'gb2312',
|
||||||
|
'euc_cn' : 'gb2312',
|
||||||
|
'euccn' : 'gb2312',
|
||||||
|
'eucgb2312_cn' : 'gb2312',
|
||||||
|
'gb2312_1980' : 'gb2312',
|
||||||
|
'gb2312_80' : 'gb2312',
|
||||||
|
'iso_ir_58' : 'gb2312',
|
||||||
|
|
||||||
|
# gbk codec
|
||||||
|
'936' : 'gbk',
|
||||||
|
'cp936' : 'gbk',
|
||||||
|
'ms936' : 'gbk',
|
||||||
|
|
||||||
|
# hex_codec codec
|
||||||
|
'hex' : 'hex_codec',
|
||||||
|
|
||||||
|
# hp_roman8 codec
|
||||||
|
'roman8' : 'hp_roman8',
|
||||||
|
'r8' : 'hp_roman8',
|
||||||
|
'csHPRoman8' : 'hp_roman8',
|
||||||
|
|
||||||
|
# hz codec
|
||||||
|
'hzgb' : 'hz',
|
||||||
|
'hz_gb' : 'hz',
|
||||||
|
'hz_gb_2312' : 'hz',
|
||||||
|
|
||||||
|
# iso2022_jp codec
|
||||||
|
'csiso2022jp' : 'iso2022_jp',
|
||||||
|
'iso2022jp' : 'iso2022_jp',
|
||||||
|
'iso_2022_jp' : 'iso2022_jp',
|
||||||
|
|
||||||
|
# iso2022_jp_1 codec
|
||||||
|
'iso2022jp_1' : 'iso2022_jp_1',
|
||||||
|
'iso_2022_jp_1' : 'iso2022_jp_1',
|
||||||
|
|
||||||
|
# iso2022_jp_2 codec
|
||||||
|
'iso2022jp_2' : 'iso2022_jp_2',
|
||||||
|
'iso_2022_jp_2' : 'iso2022_jp_2',
|
||||||
|
|
||||||
|
# iso2022_jp_2004 codec
|
||||||
|
'iso_2022_jp_2004' : 'iso2022_jp_2004',
|
||||||
|
'iso2022jp_2004' : 'iso2022_jp_2004',
|
||||||
|
|
||||||
|
# iso2022_jp_3 codec
|
||||||
|
'iso2022jp_3' : 'iso2022_jp_3',
|
||||||
|
'iso_2022_jp_3' : 'iso2022_jp_3',
|
||||||
|
|
||||||
|
# iso2022_jp_ext codec
|
||||||
|
'iso2022jp_ext' : 'iso2022_jp_ext',
|
||||||
|
'iso_2022_jp_ext' : 'iso2022_jp_ext',
|
||||||
|
|
||||||
|
# iso2022_kr codec
|
||||||
|
'csiso2022kr' : 'iso2022_kr',
|
||||||
|
'iso2022kr' : 'iso2022_kr',
|
||||||
|
'iso_2022_kr' : 'iso2022_kr',
|
||||||
|
|
||||||
|
# iso8859_10 codec
|
||||||
|
'csisolatin6' : 'iso8859_10',
|
||||||
|
'iso_8859_10' : 'iso8859_10',
|
||||||
|
'iso_8859_10_1992' : 'iso8859_10',
|
||||||
|
'iso_ir_157' : 'iso8859_10',
|
||||||
|
'l6' : 'iso8859_10',
|
||||||
|
'latin6' : 'iso8859_10',
|
||||||
|
|
||||||
|
# iso8859_11 codec
|
||||||
|
'thai' : 'iso8859_11',
|
||||||
|
'iso_8859_11' : 'iso8859_11',
|
||||||
|
'iso_8859_11_2001' : 'iso8859_11',
|
||||||
|
|
||||||
|
# iso8859_13 codec
|
||||||
|
'iso_8859_13' : 'iso8859_13',
|
||||||
|
'l7' : 'iso8859_13',
|
||||||
|
'latin7' : 'iso8859_13',
|
||||||
|
|
||||||
|
# iso8859_14 codec
|
||||||
|
'iso_8859_14' : 'iso8859_14',
|
||||||
|
'iso_8859_14_1998' : 'iso8859_14',
|
||||||
|
'iso_celtic' : 'iso8859_14',
|
||||||
|
'iso_ir_199' : 'iso8859_14',
|
||||||
|
'l8' : 'iso8859_14',
|
||||||
|
'latin8' : 'iso8859_14',
|
||||||
|
|
||||||
|
# iso8859_15 codec
|
||||||
|
'iso_8859_15' : 'iso8859_15',
|
||||||
|
'l9' : 'iso8859_15',
|
||||||
|
'latin9' : 'iso8859_15',
|
||||||
|
|
||||||
|
# iso8859_16 codec
|
||||||
|
'iso_8859_16' : 'iso8859_16',
|
||||||
|
'iso_8859_16_2001' : 'iso8859_16',
|
||||||
|
'iso_ir_226' : 'iso8859_16',
|
||||||
|
'l10' : 'iso8859_16',
|
||||||
|
'latin10' : 'iso8859_16',
|
||||||
|
|
||||||
|
# iso8859_2 codec
|
||||||
|
'csisolatin2' : 'iso8859_2',
|
||||||
|
'iso_8859_2' : 'iso8859_2',
|
||||||
|
'iso_8859_2_1987' : 'iso8859_2',
|
||||||
|
'iso_ir_101' : 'iso8859_2',
|
||||||
|
'l2' : 'iso8859_2',
|
||||||
|
'latin2' : 'iso8859_2',
|
||||||
|
|
||||||
|
# iso8859_3 codec
|
||||||
|
'csisolatin3' : 'iso8859_3',
|
||||||
|
'iso_8859_3' : 'iso8859_3',
|
||||||
|
'iso_8859_3_1988' : 'iso8859_3',
|
||||||
|
'iso_ir_109' : 'iso8859_3',
|
||||||
|
'l3' : 'iso8859_3',
|
||||||
|
'latin3' : 'iso8859_3',
|
||||||
|
|
||||||
|
# iso8859_4 codec
|
||||||
|
'csisolatin4' : 'iso8859_4',
|
||||||
|
'iso_8859_4' : 'iso8859_4',
|
||||||
|
'iso_8859_4_1988' : 'iso8859_4',
|
||||||
|
'iso_ir_110' : 'iso8859_4',
|
||||||
|
'l4' : 'iso8859_4',
|
||||||
|
'latin4' : 'iso8859_4',
|
||||||
|
|
||||||
|
# iso8859_5 codec
|
||||||
|
'csisolatincyrillic' : 'iso8859_5',
|
||||||
|
'cyrillic' : 'iso8859_5',
|
||||||
|
'iso_8859_5' : 'iso8859_5',
|
||||||
|
'iso_8859_5_1988' : 'iso8859_5',
|
||||||
|
'iso_ir_144' : 'iso8859_5',
|
||||||
|
|
||||||
|
# iso8859_6 codec
|
||||||
|
'arabic' : 'iso8859_6',
|
||||||
|
'asmo_708' : 'iso8859_6',
|
||||||
|
'csisolatinarabic' : 'iso8859_6',
|
||||||
|
'ecma_114' : 'iso8859_6',
|
||||||
|
'iso_8859_6' : 'iso8859_6',
|
||||||
|
'iso_8859_6_1987' : 'iso8859_6',
|
||||||
|
'iso_ir_127' : 'iso8859_6',
|
||||||
|
|
||||||
|
# iso8859_7 codec
|
||||||
|
'csisolatingreek' : 'iso8859_7',
|
||||||
|
'ecma_118' : 'iso8859_7',
|
||||||
|
'elot_928' : 'iso8859_7',
|
||||||
|
'greek' : 'iso8859_7',
|
||||||
|
'greek8' : 'iso8859_7',
|
||||||
|
'iso_8859_7' : 'iso8859_7',
|
||||||
|
'iso_8859_7_1987' : 'iso8859_7',
|
||||||
|
'iso_ir_126' : 'iso8859_7',
|
||||||
|
|
||||||
|
# iso8859_8 codec
|
||||||
|
'csisolatinhebrew' : 'iso8859_8',
|
||||||
|
'hebrew' : 'iso8859_8',
|
||||||
|
'iso_8859_8' : 'iso8859_8',
|
||||||
|
'iso_8859_8_1988' : 'iso8859_8',
|
||||||
|
'iso_ir_138' : 'iso8859_8',
|
||||||
|
|
||||||
|
# iso8859_9 codec
|
||||||
|
'csisolatin5' : 'iso8859_9',
|
||||||
|
'iso_8859_9' : 'iso8859_9',
|
||||||
|
'iso_8859_9_1989' : 'iso8859_9',
|
||||||
|
'iso_ir_148' : 'iso8859_9',
|
||||||
|
'l5' : 'iso8859_9',
|
||||||
|
'latin5' : 'iso8859_9',
|
||||||
|
|
||||||
|
# johab codec
|
||||||
|
'cp1361' : 'johab',
|
||||||
|
'ms1361' : 'johab',
|
||||||
|
|
||||||
|
# koi8_r codec
|
||||||
|
'cskoi8r' : 'koi8_r',
|
||||||
|
|
||||||
|
# latin_1 codec
|
||||||
|
#
|
||||||
|
# Note that the latin_1 codec is implemented internally in C and a
|
||||||
|
# lot faster than the charmap codec iso8859_1 which uses the same
|
||||||
|
# encoding. This is why we discourage the use of the iso8859_1
|
||||||
|
# codec and alias it to latin_1 instead.
|
||||||
|
#
|
||||||
|
'8859' : 'latin_1',
|
||||||
|
'cp819' : 'latin_1',
|
||||||
|
'csisolatin1' : 'latin_1',
|
||||||
|
'ibm819' : 'latin_1',
|
||||||
|
'iso8859' : 'latin_1',
|
||||||
|
'iso8859_1' : 'latin_1',
|
||||||
|
'iso_8859_1' : 'latin_1',
|
||||||
|
'iso_8859_1_1987' : 'latin_1',
|
||||||
|
'iso_ir_100' : 'latin_1',
|
||||||
|
'l1' : 'latin_1',
|
||||||
|
'latin' : 'latin_1',
|
||||||
|
'latin1' : 'latin_1',
|
||||||
|
|
||||||
|
# mac_cyrillic codec
|
||||||
|
'maccyrillic' : 'mac_cyrillic',
|
||||||
|
|
||||||
|
# mac_greek codec
|
||||||
|
'macgreek' : 'mac_greek',
|
||||||
|
|
||||||
|
# mac_iceland codec
|
||||||
|
'maciceland' : 'mac_iceland',
|
||||||
|
|
||||||
|
# mac_latin2 codec
|
||||||
|
'maccentraleurope' : 'mac_latin2',
|
||||||
|
'maclatin2' : 'mac_latin2',
|
||||||
|
|
||||||
|
# mac_roman codec
|
||||||
|
'macroman' : 'mac_roman',
|
||||||
|
|
||||||
|
# mac_turkish codec
|
||||||
|
'macturkish' : 'mac_turkish',
|
||||||
|
|
||||||
|
# mbcs codec
|
||||||
|
'dbcs' : 'mbcs',
|
||||||
|
|
||||||
|
# ptcp154 codec
|
||||||
|
'csptcp154' : 'ptcp154',
|
||||||
|
'pt154' : 'ptcp154',
|
||||||
|
'cp154' : 'ptcp154',
|
||||||
|
'cyrillic-asian' : 'ptcp154',
|
||||||
|
|
||||||
|
# quopri_codec codec
|
||||||
|
'quopri' : 'quopri_codec',
|
||||||
|
'quoted_printable' : 'quopri_codec',
|
||||||
|
'quotedprintable' : 'quopri_codec',
|
||||||
|
|
||||||
|
# rot_13 codec
|
||||||
|
'rot13' : 'rot_13',
|
||||||
|
|
||||||
|
# shift_jis codec
|
||||||
|
'csshiftjis' : 'shift_jis',
|
||||||
|
'shiftjis' : 'shift_jis',
|
||||||
|
'sjis' : 'shift_jis',
|
||||||
|
's_jis' : 'shift_jis',
|
||||||
|
|
||||||
|
# shift_jis_2004 codec
|
||||||
|
'shiftjis2004' : 'shift_jis_2004',
|
||||||
|
'sjis_2004' : 'shift_jis_2004',
|
||||||
|
's_jis_2004' : 'shift_jis_2004',
|
||||||
|
|
||||||
|
# shift_jisx0213 codec
|
||||||
|
'shiftjisx0213' : 'shift_jisx0213',
|
||||||
|
'sjisx0213' : 'shift_jisx0213',
|
||||||
|
's_jisx0213' : 'shift_jisx0213',
|
||||||
|
|
||||||
|
# tactis codec
|
||||||
|
'tis260' : 'tactis',
|
||||||
|
|
||||||
|
# tis_620 codec
|
||||||
|
'tis620' : 'tis_620',
|
||||||
|
'tis_620_0' : 'tis_620',
|
||||||
|
'tis_620_2529_0' : 'tis_620',
|
||||||
|
'tis_620_2529_1' : 'tis_620',
|
||||||
|
'iso_ir_166' : 'tis_620',
|
||||||
|
|
||||||
|
# utf_16 codec
|
||||||
|
'u16' : 'utf_16',
|
||||||
|
'utf16' : 'utf_16',
|
||||||
|
|
||||||
|
# utf_16_be codec
|
||||||
|
'unicodebigunmarked' : 'utf_16_be',
|
||||||
|
'utf_16be' : 'utf_16_be',
|
||||||
|
|
||||||
|
# utf_16_le codec
|
||||||
|
'unicodelittleunmarked' : 'utf_16_le',
|
||||||
|
'utf_16le' : 'utf_16_le',
|
||||||
|
|
||||||
|
# utf_32 codec
|
||||||
|
'u32' : 'utf_32',
|
||||||
|
'utf32' : 'utf_32',
|
||||||
|
|
||||||
|
# utf_32_be codec
|
||||||
|
'utf_32be' : 'utf_32_be',
|
||||||
|
|
||||||
|
# utf_32_le codec
|
||||||
|
'utf_32le' : 'utf_32_le',
|
||||||
|
|
||||||
|
# utf_7 codec
|
||||||
|
'u7' : 'utf_7',
|
||||||
|
'utf7' : 'utf_7',
|
||||||
|
'unicode_1_1_utf_7' : 'utf_7',
|
||||||
|
|
||||||
|
# utf_8 codec
|
||||||
|
'u8' : 'utf_8',
|
||||||
|
'utf' : 'utf_8',
|
||||||
|
'utf8' : 'utf_8',
|
||||||
|
'utf8_ucs2' : 'utf_8',
|
||||||
|
'utf8_ucs4' : 'utf_8',
|
||||||
|
|
||||||
|
# uu_codec codec
|
||||||
|
'uu' : 'uu_codec',
|
||||||
|
|
||||||
|
# zlib_codec codec
|
||||||
|
'zip' : 'zlib_codec',
|
||||||
|
'zlib' : 'zlib_codec',
|
||||||
|
|
||||||
|
}
|
BIN
icarus-miner/data/usr/lib/python2.6/aliases.pyc
Normal file
BIN
icarus-miner/data/usr/lib/python2.6/aliases.pyc
Normal file
Binary file not shown.
50
icarus-miner/data/usr/lib/python2.6/ascii.py
Normal file
50
icarus-miner/data/usr/lib/python2.6/ascii.py
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
""" Python 'ascii' Codec
|
||||||
|
|
||||||
|
|
||||||
|
Written by Marc-Andre Lemburg (mal@lemburg.com).
|
||||||
|
|
||||||
|
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
|
||||||
|
|
||||||
|
"""
|
||||||
|
import codecs
|
||||||
|
|
||||||
|
### Codec APIs
|
||||||
|
|
||||||
|
class Codec(codecs.Codec):
|
||||||
|
|
||||||
|
# Note: Binding these as C functions will result in the class not
|
||||||
|
# converting them to methods. This is intended.
|
||||||
|
encode = codecs.ascii_encode
|
||||||
|
decode = codecs.ascii_decode
|
||||||
|
|
||||||
|
class IncrementalEncoder(codecs.IncrementalEncoder):
|
||||||
|
def encode(self, input, final=False):
|
||||||
|
return codecs.ascii_encode(input, self.errors)[0]
|
||||||
|
|
||||||
|
class IncrementalDecoder(codecs.IncrementalDecoder):
|
||||||
|
def decode(self, input, final=False):
|
||||||
|
return codecs.ascii_decode(input, self.errors)[0]
|
||||||
|
|
||||||
|
class StreamWriter(Codec,codecs.StreamWriter):
|
||||||
|
pass
|
||||||
|
|
||||||
|
class StreamReader(Codec,codecs.StreamReader):
|
||||||
|
pass
|
||||||
|
|
||||||
|
class StreamConverter(StreamWriter,StreamReader):
|
||||||
|
|
||||||
|
encode = codecs.ascii_decode
|
||||||
|
decode = codecs.ascii_encode
|
||||||
|
|
||||||
|
### encodings module API
|
||||||
|
|
||||||
|
def getregentry():
|
||||||
|
return codecs.CodecInfo(
|
||||||
|
name='ascii',
|
||||||
|
encode=Codec.encode,
|
||||||
|
decode=Codec.decode,
|
||||||
|
incrementalencoder=IncrementalEncoder,
|
||||||
|
incrementaldecoder=IncrementalDecoder,
|
||||||
|
streamwriter=StreamWriter,
|
||||||
|
streamreader=StreamReader,
|
||||||
|
)
|
BIN
icarus-miner/data/usr/lib/python2.6/ascii.pyc
Normal file
BIN
icarus-miner/data/usr/lib/python2.6/ascii.pyc
Normal file
Binary file not shown.
359
icarus-miner/data/usr/lib/python2.6/base64.py
Executable file
359
icarus-miner/data/usr/lib/python2.6/base64.py
Executable file
@ -0,0 +1,359 @@
|
|||||||
|
#! /usr/bin/env python
|
||||||
|
|
||||||
|
"""RFC 3548: Base16, Base32, Base64 Data Encodings"""
|
||||||
|
|
||||||
|
# Modified 04-Oct-1995 by Jack Jansen to use binascii module
|
||||||
|
# Modified 30-Dec-2003 by Barry Warsaw to add full RFC 3548 support
|
||||||
|
|
||||||
|
import re
|
||||||
|
import struct
|
||||||
|
import binascii
|
||||||
|
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
# Legacy interface exports traditional RFC 1521 Base64 encodings
|
||||||
|
'encode', 'decode', 'encodestring', 'decodestring',
|
||||||
|
# Generalized interface for other encodings
|
||||||
|
'b64encode', 'b64decode', 'b32encode', 'b32decode',
|
||||||
|
'b16encode', 'b16decode',
|
||||||
|
# Standard Base64 encoding
|
||||||
|
'standard_b64encode', 'standard_b64decode',
|
||||||
|
# Some common Base64 alternatives. As referenced by RFC 3458, see thread
|
||||||
|
# starting at:
|
||||||
|
#
|
||||||
|
# http://zgp.org/pipermail/p2p-hackers/2001-September/000316.html
|
||||||
|
'urlsafe_b64encode', 'urlsafe_b64decode',
|
||||||
|
]
|
||||||
|
|
||||||
|
_translation = [chr(_x) for _x in range(256)]
|
||||||
|
EMPTYSTRING = ''
|
||||||
|
|
||||||
|
|
||||||
|
def _translate(s, altchars):
|
||||||
|
translation = _translation[:]
|
||||||
|
for k, v in altchars.items():
|
||||||
|
translation[ord(k)] = v
|
||||||
|
return s.translate(''.join(translation))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Base64 encoding/decoding uses binascii
|
||||||
|
|
||||||
|
def b64encode(s, altchars=None):
|
||||||
|
"""Encode a string using Base64.
|
||||||
|
|
||||||
|
s is the string to encode. Optional altchars must be a string of at least
|
||||||
|
length 2 (additional characters are ignored) which specifies an
|
||||||
|
alternative alphabet for the '+' and '/' characters. This allows an
|
||||||
|
application to e.g. generate url or filesystem safe Base64 strings.
|
||||||
|
|
||||||
|
The encoded string is returned.
|
||||||
|
"""
|
||||||
|
# Strip off the trailing newline
|
||||||
|
encoded = binascii.b2a_base64(s)[:-1]
|
||||||
|
if altchars is not None:
|
||||||
|
return _translate(encoded, {'+': altchars[0], '/': altchars[1]})
|
||||||
|
return encoded
|
||||||
|
|
||||||
|
|
||||||
|
def b64decode(s, altchars=None):
|
||||||
|
"""Decode a Base64 encoded string.
|
||||||
|
|
||||||
|
s is the string to decode. Optional altchars must be a string of at least
|
||||||
|
length 2 (additional characters are ignored) which specifies the
|
||||||
|
alternative alphabet used instead of the '+' and '/' characters.
|
||||||
|
|
||||||
|
The decoded string is returned. A TypeError is raised if s were
|
||||||
|
incorrectly padded or if there are non-alphabet characters present in the
|
||||||
|
string.
|
||||||
|
"""
|
||||||
|
if altchars is not None:
|
||||||
|
s = _translate(s, {altchars[0]: '+', altchars[1]: '/'})
|
||||||
|
try:
|
||||||
|
return binascii.a2b_base64(s)
|
||||||
|
except binascii.Error, msg:
|
||||||
|
# Transform this exception for consistency
|
||||||
|
raise TypeError(msg)
|
||||||
|
|
||||||
|
|
||||||
|
def standard_b64encode(s):
|
||||||
|
"""Encode a string using the standard Base64 alphabet.
|
||||||
|
|
||||||
|
s is the string to encode. The encoded string is returned.
|
||||||
|
"""
|
||||||
|
return b64encode(s)
|
||||||
|
|
||||||
|
def standard_b64decode(s):
|
||||||
|
"""Decode a string encoded with the standard Base64 alphabet.
|
||||||
|
|
||||||
|
s is the string to decode. The decoded string is returned. A TypeError
|
||||||
|
is raised if the string is incorrectly padded or if there are non-alphabet
|
||||||
|
characters present in the string.
|
||||||
|
"""
|
||||||
|
return b64decode(s)
|
||||||
|
|
||||||
|
def urlsafe_b64encode(s):
|
||||||
|
"""Encode a string using a url-safe Base64 alphabet.
|
||||||
|
|
||||||
|
s is the string to encode. The encoded string is returned. The alphabet
|
||||||
|
uses '-' instead of '+' and '_' instead of '/'.
|
||||||
|
"""
|
||||||
|
return b64encode(s, '-_')
|
||||||
|
|
||||||
|
def urlsafe_b64decode(s):
|
||||||
|
"""Decode a string encoded with the standard Base64 alphabet.
|
||||||
|
|
||||||
|
s is the string to decode. The decoded string is returned. A TypeError
|
||||||
|
is raised if the string is incorrectly padded or if there are non-alphabet
|
||||||
|
characters present in the string.
|
||||||
|
|
||||||
|
The alphabet uses '-' instead of '+' and '_' instead of '/'.
|
||||||
|
"""
|
||||||
|
return b64decode(s, '-_')
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Base32 encoding/decoding must be done in Python
|
||||||
|
_b32alphabet = {
|
||||||
|
0: 'A', 9: 'J', 18: 'S', 27: '3',
|
||||||
|
1: 'B', 10: 'K', 19: 'T', 28: '4',
|
||||||
|
2: 'C', 11: 'L', 20: 'U', 29: '5',
|
||||||
|
3: 'D', 12: 'M', 21: 'V', 30: '6',
|
||||||
|
4: 'E', 13: 'N', 22: 'W', 31: '7',
|
||||||
|
5: 'F', 14: 'O', 23: 'X',
|
||||||
|
6: 'G', 15: 'P', 24: 'Y',
|
||||||
|
7: 'H', 16: 'Q', 25: 'Z',
|
||||||
|
8: 'I', 17: 'R', 26: '2',
|
||||||
|
}
|
||||||
|
|
||||||
|
_b32tab = _b32alphabet.items()
|
||||||
|
_b32tab.sort()
|
||||||
|
_b32tab = [v for k, v in _b32tab]
|
||||||
|
_b32rev = dict([(v, long(k)) for k, v in _b32alphabet.items()])
|
||||||
|
|
||||||
|
|
||||||
|
def b32encode(s):
|
||||||
|
"""Encode a string using Base32.
|
||||||
|
|
||||||
|
s is the string to encode. The encoded string is returned.
|
||||||
|
"""
|
||||||
|
parts = []
|
||||||
|
quanta, leftover = divmod(len(s), 5)
|
||||||
|
# Pad the last quantum with zero bits if necessary
|
||||||
|
if leftover:
|
||||||
|
s += ('\0' * (5 - leftover))
|
||||||
|
quanta += 1
|
||||||
|
for i in range(quanta):
|
||||||
|
# c1 and c2 are 16 bits wide, c3 is 8 bits wide. The intent of this
|
||||||
|
# code is to process the 40 bits in units of 5 bits. So we take the 1
|
||||||
|
# leftover bit of c1 and tack it onto c2. Then we take the 2 leftover
|
||||||
|
# bits of c2 and tack them onto c3. The shifts and masks are intended
|
||||||
|
# to give us values of exactly 5 bits in width.
|
||||||
|
c1, c2, c3 = struct.unpack('!HHB', s[i*5:(i+1)*5])
|
||||||
|
c2 += (c1 & 1) << 16 # 17 bits wide
|
||||||
|
c3 += (c2 & 3) << 8 # 10 bits wide
|
||||||
|
parts.extend([_b32tab[c1 >> 11], # bits 1 - 5
|
||||||
|
_b32tab[(c1 >> 6) & 0x1f], # bits 6 - 10
|
||||||
|
_b32tab[(c1 >> 1) & 0x1f], # bits 11 - 15
|
||||||
|
_b32tab[c2 >> 12], # bits 16 - 20 (1 - 5)
|
||||||
|
_b32tab[(c2 >> 7) & 0x1f], # bits 21 - 25 (6 - 10)
|
||||||
|
_b32tab[(c2 >> 2) & 0x1f], # bits 26 - 30 (11 - 15)
|
||||||
|
_b32tab[c3 >> 5], # bits 31 - 35 (1 - 5)
|
||||||
|
_b32tab[c3 & 0x1f], # bits 36 - 40 (1 - 5)
|
||||||
|
])
|
||||||
|
encoded = EMPTYSTRING.join(parts)
|
||||||
|
# Adjust for any leftover partial quanta
|
||||||
|
if leftover == 1:
|
||||||
|
return encoded[:-6] + '======'
|
||||||
|
elif leftover == 2:
|
||||||
|
return encoded[:-4] + '===='
|
||||||
|
elif leftover == 3:
|
||||||
|
return encoded[:-3] + '==='
|
||||||
|
elif leftover == 4:
|
||||||
|
return encoded[:-1] + '='
|
||||||
|
return encoded
|
||||||
|
|
||||||
|
|
||||||
|
def b32decode(s, casefold=False, map01=None):
|
||||||
|
"""Decode a Base32 encoded string.
|
||||||
|
|
||||||
|
s is the string to decode. Optional casefold is a flag specifying whether
|
||||||
|
a lowercase alphabet is acceptable as input. For security purposes, the
|
||||||
|
default is False.
|
||||||
|
|
||||||
|
RFC 3548 allows for optional mapping of the digit 0 (zero) to the letter O
|
||||||
|
(oh), and for optional mapping of the digit 1 (one) to either the letter I
|
||||||
|
(eye) or letter L (el). The optional argument map01 when not None,
|
||||||
|
specifies which letter the digit 1 should be mapped to (when map01 is not
|
||||||
|
None, the digit 0 is always mapped to the letter O). For security
|
||||||
|
purposes the default is None, so that 0 and 1 are not allowed in the
|
||||||
|
input.
|
||||||
|
|
||||||
|
The decoded string is returned. A TypeError is raised if s were
|
||||||
|
incorrectly padded or if there are non-alphabet characters present in the
|
||||||
|
string.
|
||||||
|
"""
|
||||||
|
quanta, leftover = divmod(len(s), 8)
|
||||||
|
if leftover:
|
||||||
|
raise TypeError('Incorrect padding')
|
||||||
|
# Handle section 2.4 zero and one mapping. The flag map01 will be either
|
||||||
|
# False, or the character to map the digit 1 (one) to. It should be
|
||||||
|
# either L (el) or I (eye).
|
||||||
|
if map01:
|
||||||
|
s = _translate(s, {'0': 'O', '1': map01})
|
||||||
|
if casefold:
|
||||||
|
s = s.upper()
|
||||||
|
# Strip off pad characters from the right. We need to count the pad
|
||||||
|
# characters because this will tell us how many null bytes to remove from
|
||||||
|
# the end of the decoded string.
|
||||||
|
padchars = 0
|
||||||
|
mo = re.search('(?P<pad>[=]*)$', s)
|
||||||
|
if mo:
|
||||||
|
padchars = len(mo.group('pad'))
|
||||||
|
if padchars > 0:
|
||||||
|
s = s[:-padchars]
|
||||||
|
# Now decode the full quanta
|
||||||
|
parts = []
|
||||||
|
acc = 0
|
||||||
|
shift = 35
|
||||||
|
for c in s:
|
||||||
|
val = _b32rev.get(c)
|
||||||
|
if val is None:
|
||||||
|
raise TypeError('Non-base32 digit found')
|
||||||
|
acc += _b32rev[c] << shift
|
||||||
|
shift -= 5
|
||||||
|
if shift < 0:
|
||||||
|
parts.append(binascii.unhexlify('%010x' % acc))
|
||||||
|
acc = 0
|
||||||
|
shift = 35
|
||||||
|
# Process the last, partial quanta
|
||||||
|
last = binascii.unhexlify('%010x' % acc)
|
||||||
|
if padchars == 0:
|
||||||
|
last = '' # No characters
|
||||||
|
elif padchars == 1:
|
||||||
|
last = last[:-1]
|
||||||
|
elif padchars == 3:
|
||||||
|
last = last[:-2]
|
||||||
|
elif padchars == 4:
|
||||||
|
last = last[:-3]
|
||||||
|
elif padchars == 6:
|
||||||
|
last = last[:-4]
|
||||||
|
else:
|
||||||
|
raise TypeError('Incorrect padding')
|
||||||
|
parts.append(last)
|
||||||
|
return EMPTYSTRING.join(parts)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# RFC 3548, Base 16 Alphabet specifies uppercase, but hexlify() returns
|
||||||
|
# lowercase. The RFC also recommends against accepting input case
|
||||||
|
# insensitively.
|
||||||
|
def b16encode(s):
|
||||||
|
"""Encode a string using Base16.
|
||||||
|
|
||||||
|
s is the string to encode. The encoded string is returned.
|
||||||
|
"""
|
||||||
|
return binascii.hexlify(s).upper()
|
||||||
|
|
||||||
|
|
||||||
|
def b16decode(s, casefold=False):
|
||||||
|
"""Decode a Base16 encoded string.
|
||||||
|
|
||||||
|
s is the string to decode. Optional casefold is a flag specifying whether
|
||||||
|
a lowercase alphabet is acceptable as input. For security purposes, the
|
||||||
|
default is False.
|
||||||
|
|
||||||
|
The decoded string is returned. A TypeError is raised if s were
|
||||||
|
incorrectly padded or if there are non-alphabet characters present in the
|
||||||
|
string.
|
||||||
|
"""
|
||||||
|
if casefold:
|
||||||
|
s = s.upper()
|
||||||
|
if re.search('[^0-9A-F]', s):
|
||||||
|
raise TypeError('Non-base16 digit found')
|
||||||
|
return binascii.unhexlify(s)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Legacy interface. This code could be cleaned up since I don't believe
|
||||||
|
# binascii has any line length limitations. It just doesn't seem worth it
|
||||||
|
# though.
|
||||||
|
|
||||||
|
MAXLINESIZE = 76 # Excluding the CRLF
|
||||||
|
MAXBINSIZE = (MAXLINESIZE//4)*3
|
||||||
|
|
||||||
|
def encode(input, output):
|
||||||
|
"""Encode a file."""
|
||||||
|
while True:
|
||||||
|
s = input.read(MAXBINSIZE)
|
||||||
|
if not s:
|
||||||
|
break
|
||||||
|
while len(s) < MAXBINSIZE:
|
||||||
|
ns = input.read(MAXBINSIZE-len(s))
|
||||||
|
if not ns:
|
||||||
|
break
|
||||||
|
s += ns
|
||||||
|
line = binascii.b2a_base64(s)
|
||||||
|
output.write(line)
|
||||||
|
|
||||||
|
|
||||||
|
def decode(input, output):
|
||||||
|
"""Decode a file."""
|
||||||
|
while True:
|
||||||
|
line = input.readline()
|
||||||
|
if not line:
|
||||||
|
break
|
||||||
|
s = binascii.a2b_base64(line)
|
||||||
|
output.write(s)
|
||||||
|
|
||||||
|
|
||||||
|
def encodestring(s):
|
||||||
|
"""Encode a string into multiple lines of base-64 data."""
|
||||||
|
pieces = []
|
||||||
|
for i in range(0, len(s), MAXBINSIZE):
|
||||||
|
chunk = s[i : i + MAXBINSIZE]
|
||||||
|
pieces.append(binascii.b2a_base64(chunk))
|
||||||
|
return "".join(pieces)
|
||||||
|
|
||||||
|
|
||||||
|
def decodestring(s):
|
||||||
|
"""Decode a string."""
|
||||||
|
return binascii.a2b_base64(s)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Useable as a script...
|
||||||
|
def test():
|
||||||
|
"""Small test program"""
|
||||||
|
import sys, getopt
|
||||||
|
try:
|
||||||
|
opts, args = getopt.getopt(sys.argv[1:], 'deut')
|
||||||
|
except getopt.error, msg:
|
||||||
|
sys.stdout = sys.stderr
|
||||||
|
print msg
|
||||||
|
print """usage: %s [-d|-e|-u|-t] [file|-]
|
||||||
|
-d, -u: decode
|
||||||
|
-e: encode (default)
|
||||||
|
-t: encode and decode string 'Aladdin:open sesame'"""%sys.argv[0]
|
||||||
|
sys.exit(2)
|
||||||
|
func = encode
|
||||||
|
for o, a in opts:
|
||||||
|
if o == '-e': func = encode
|
||||||
|
if o == '-d': func = decode
|
||||||
|
if o == '-u': func = decode
|
||||||
|
if o == '-t': test1(); return
|
||||||
|
if args and args[0] != '-':
|
||||||
|
func(open(args[0], 'rb'), sys.stdout)
|
||||||
|
else:
|
||||||
|
func(sys.stdin, sys.stdout)
|
||||||
|
|
||||||
|
|
||||||
|
def test1():
|
||||||
|
s0 = "Aladdin:open sesame"
|
||||||
|
s1 = encodestring(s0)
|
||||||
|
s2 = decodestring(s1)
|
||||||
|
print s0, repr(s1), s2
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
test()
|
BIN
icarus-miner/data/usr/lib/python2.6/base64.pyc
Normal file
BIN
icarus-miner/data/usr/lib/python2.6/base64.pyc
Normal file
Binary file not shown.
92
icarus-miner/data/usr/lib/python2.6/bisect.py
Normal file
92
icarus-miner/data/usr/lib/python2.6/bisect.py
Normal file
@ -0,0 +1,92 @@
|
|||||||
|
"""Bisection algorithms."""
|
||||||
|
|
||||||
|
def insort_right(a, x, lo=0, hi=None):
|
||||||
|
"""Insert item x in list a, and keep it sorted assuming a is sorted.
|
||||||
|
|
||||||
|
If x is already in a, insert it to the right of the rightmost x.
|
||||||
|
|
||||||
|
Optional args lo (default 0) and hi (default len(a)) bound the
|
||||||
|
slice of a to be searched.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if lo < 0:
|
||||||
|
raise ValueError('lo must be non-negative')
|
||||||
|
if hi is None:
|
||||||
|
hi = len(a)
|
||||||
|
while lo < hi:
|
||||||
|
mid = (lo+hi)//2
|
||||||
|
if x < a[mid]: hi = mid
|
||||||
|
else: lo = mid+1
|
||||||
|
a.insert(lo, x)
|
||||||
|
|
||||||
|
insort = insort_right # backward compatibility
|
||||||
|
|
||||||
|
def bisect_right(a, x, lo=0, hi=None):
|
||||||
|
"""Return the index where to insert item x in list a, assuming a is sorted.
|
||||||
|
|
||||||
|
The return value i is such that all e in a[:i] have e <= x, and all e in
|
||||||
|
a[i:] have e > x. So if x already appears in the list, a.insert(x) will
|
||||||
|
insert just after the rightmost x already there.
|
||||||
|
|
||||||
|
Optional args lo (default 0) and hi (default len(a)) bound the
|
||||||
|
slice of a to be searched.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if lo < 0:
|
||||||
|
raise ValueError('lo must be non-negative')
|
||||||
|
if hi is None:
|
||||||
|
hi = len(a)
|
||||||
|
while lo < hi:
|
||||||
|
mid = (lo+hi)//2
|
||||||
|
if x < a[mid]: hi = mid
|
||||||
|
else: lo = mid+1
|
||||||
|
return lo
|
||||||
|
|
||||||
|
bisect = bisect_right # backward compatibility
|
||||||
|
|
||||||
|
def insort_left(a, x, lo=0, hi=None):
|
||||||
|
"""Insert item x in list a, and keep it sorted assuming a is sorted.
|
||||||
|
|
||||||
|
If x is already in a, insert it to the left of the leftmost x.
|
||||||
|
|
||||||
|
Optional args lo (default 0) and hi (default len(a)) bound the
|
||||||
|
slice of a to be searched.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if lo < 0:
|
||||||
|
raise ValueError('lo must be non-negative')
|
||||||
|
if hi is None:
|
||||||
|
hi = len(a)
|
||||||
|
while lo < hi:
|
||||||
|
mid = (lo+hi)//2
|
||||||
|
if a[mid] < x: lo = mid+1
|
||||||
|
else: hi = mid
|
||||||
|
a.insert(lo, x)
|
||||||
|
|
||||||
|
|
||||||
|
def bisect_left(a, x, lo=0, hi=None):
|
||||||
|
"""Return the index where to insert item x in list a, assuming a is sorted.
|
||||||
|
|
||||||
|
The return value i is such that all e in a[:i] have e < x, and all e in
|
||||||
|
a[i:] have e >= x. So if x already appears in the list, a.insert(x) will
|
||||||
|
insert just before the leftmost x already there.
|
||||||
|
|
||||||
|
Optional args lo (default 0) and hi (default len(a)) bound the
|
||||||
|
slice of a to be searched.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if lo < 0:
|
||||||
|
raise ValueError('lo must be non-negative')
|
||||||
|
if hi is None:
|
||||||
|
hi = len(a)
|
||||||
|
while lo < hi:
|
||||||
|
mid = (lo+hi)//2
|
||||||
|
if a[mid] < x: lo = mid+1
|
||||||
|
else: hi = mid
|
||||||
|
return lo
|
||||||
|
|
||||||
|
# Overwrite above definitions with a fast C implementation
|
||||||
|
try:
|
||||||
|
from _bisect import bisect_right, bisect_left, insort_left, insort_right, insort, bisect
|
||||||
|
except ImportError:
|
||||||
|
pass
|
BIN
icarus-miner/data/usr/lib/python2.6/bisect.pyc
Normal file
BIN
icarus-miner/data/usr/lib/python2.6/bisect.pyc
Normal file
Binary file not shown.
1082
icarus-miner/data/usr/lib/python2.6/codecs.py
Normal file
1082
icarus-miner/data/usr/lib/python2.6/codecs.py
Normal file
File diff suppressed because it is too large
Load Diff
BIN
icarus-miner/data/usr/lib/python2.6/codecs.pyc
Normal file
BIN
icarus-miner/data/usr/lib/python2.6/codecs.pyc
Normal file
Binary file not shown.
149
icarus-miner/data/usr/lib/python2.6/collections.py
Normal file
149
icarus-miner/data/usr/lib/python2.6/collections.py
Normal file
@ -0,0 +1,149 @@
|
|||||||
|
__all__ = ['deque', 'defaultdict', 'namedtuple']
|
||||||
|
# For bootstrapping reasons, the collection ABCs are defined in _abcoll.py.
|
||||||
|
# They should however be considered an integral part of collections.py.
|
||||||
|
from _abcoll import *
|
||||||
|
import _abcoll
|
||||||
|
__all__ += _abcoll.__all__
|
||||||
|
|
||||||
|
from _collections import deque, defaultdict
|
||||||
|
from operator import itemgetter as _itemgetter
|
||||||
|
from keyword import iskeyword as _iskeyword
|
||||||
|
import sys as _sys
|
||||||
|
|
||||||
|
def namedtuple(typename, field_names, verbose=False):
|
||||||
|
"""Returns a new subclass of tuple with named fields.
|
||||||
|
|
||||||
|
>>> Point = namedtuple('Point', 'x y')
|
||||||
|
>>> Point.__doc__ # docstring for the new class
|
||||||
|
'Point(x, y)'
|
||||||
|
>>> p = Point(11, y=22) # instantiate with positional args or keywords
|
||||||
|
>>> p[0] + p[1] # indexable like a plain tuple
|
||||||
|
33
|
||||||
|
>>> x, y = p # unpack like a regular tuple
|
||||||
|
>>> x, y
|
||||||
|
(11, 22)
|
||||||
|
>>> p.x + p.y # fields also accessable by name
|
||||||
|
33
|
||||||
|
>>> d = p._asdict() # convert to a dictionary
|
||||||
|
>>> d['x']
|
||||||
|
11
|
||||||
|
>>> Point(**d) # convert from a dictionary
|
||||||
|
Point(x=11, y=22)
|
||||||
|
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
|
||||||
|
Point(x=100, y=22)
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Parse and validate the field names. Validation serves two purposes,
|
||||||
|
# generating informative error messages and preventing template injection attacks.
|
||||||
|
if isinstance(field_names, basestring):
|
||||||
|
field_names = field_names.replace(',', ' ').split() # names separated by whitespace and/or commas
|
||||||
|
field_names = tuple(map(str, field_names))
|
||||||
|
for name in (typename,) + field_names:
|
||||||
|
if not all(c.isalnum() or c=='_' for c in name):
|
||||||
|
raise ValueError('Type names and field names can only contain alphanumeric characters and underscores: %r' % name)
|
||||||
|
if _iskeyword(name):
|
||||||
|
raise ValueError('Type names and field names cannot be a keyword: %r' % name)
|
||||||
|
if name[0].isdigit():
|
||||||
|
raise ValueError('Type names and field names cannot start with a number: %r' % name)
|
||||||
|
seen_names = set()
|
||||||
|
for name in field_names:
|
||||||
|
if name.startswith('_'):
|
||||||
|
raise ValueError('Field names cannot start with an underscore: %r' % name)
|
||||||
|
if name in seen_names:
|
||||||
|
raise ValueError('Encountered duplicate field name: %r' % name)
|
||||||
|
seen_names.add(name)
|
||||||
|
|
||||||
|
# Create and fill-in the class template
|
||||||
|
numfields = len(field_names)
|
||||||
|
argtxt = repr(field_names).replace("'", "")[1:-1] # tuple repr without parens or quotes
|
||||||
|
reprtxt = ', '.join('%s=%%r' % name for name in field_names)
|
||||||
|
dicttxt = ', '.join('%r: t[%d]' % (name, pos) for pos, name in enumerate(field_names))
|
||||||
|
template = '''class %(typename)s(tuple):
|
||||||
|
'%(typename)s(%(argtxt)s)' \n
|
||||||
|
__slots__ = () \n
|
||||||
|
_fields = %(field_names)r \n
|
||||||
|
def __new__(_cls, %(argtxt)s):
|
||||||
|
return _tuple.__new__(_cls, (%(argtxt)s)) \n
|
||||||
|
@classmethod
|
||||||
|
def _make(cls, iterable, new=tuple.__new__, len=len):
|
||||||
|
'Make a new %(typename)s object from a sequence or iterable'
|
||||||
|
result = new(cls, iterable)
|
||||||
|
if len(result) != %(numfields)d:
|
||||||
|
raise TypeError('Expected %(numfields)d arguments, got %%d' %% len(result))
|
||||||
|
return result \n
|
||||||
|
def __repr__(self):
|
||||||
|
return '%(typename)s(%(reprtxt)s)' %% self \n
|
||||||
|
def _asdict(t):
|
||||||
|
'Return a new dict which maps field names to their values'
|
||||||
|
return {%(dicttxt)s} \n
|
||||||
|
def _replace(_self, **kwds):
|
||||||
|
'Return a new %(typename)s object replacing specified fields with new values'
|
||||||
|
result = _self._make(map(kwds.pop, %(field_names)r, _self))
|
||||||
|
if kwds:
|
||||||
|
raise ValueError('Got unexpected field names: %%r' %% kwds.keys())
|
||||||
|
return result \n
|
||||||
|
def __getnewargs__(self):
|
||||||
|
return tuple(self) \n\n''' % locals()
|
||||||
|
for i, name in enumerate(field_names):
|
||||||
|
template += ' %s = _property(_itemgetter(%d))\n' % (name, i)
|
||||||
|
if verbose:
|
||||||
|
print template
|
||||||
|
|
||||||
|
# Execute the template string in a temporary namespace and
|
||||||
|
# support tracing utilities by setting a value for frame.f_globals['__name__']
|
||||||
|
namespace = dict(_itemgetter=_itemgetter, __name__='namedtuple_%s' % typename,
|
||||||
|
_property=property, _tuple=tuple)
|
||||||
|
try:
|
||||||
|
exec template in namespace
|
||||||
|
except SyntaxError, e:
|
||||||
|
raise SyntaxError(e.message + ':\n' + template)
|
||||||
|
result = namespace[typename]
|
||||||
|
|
||||||
|
# For pickling to work, the __module__ variable needs to be set to the frame
|
||||||
|
# where the named tuple is created. Bypass this step in enviroments where
|
||||||
|
# sys._getframe is not defined (Jython for example).
|
||||||
|
if hasattr(_sys, '_getframe'):
|
||||||
|
result.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__')
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
# verify that instances can be pickled
|
||||||
|
from cPickle import loads, dumps
|
||||||
|
Point = namedtuple('Point', 'x, y', True)
|
||||||
|
p = Point(x=10, y=20)
|
||||||
|
assert p == loads(dumps(p))
|
||||||
|
|
||||||
|
# test and demonstrate ability to override methods
|
||||||
|
class Point(namedtuple('Point', 'x y')):
|
||||||
|
__slots__ = ()
|
||||||
|
@property
|
||||||
|
def hypot(self):
|
||||||
|
return (self.x ** 2 + self.y ** 2) ** 0.5
|
||||||
|
def __str__(self):
|
||||||
|
return 'Point: x=%6.3f y=%6.3f hypot=%6.3f' % (self.x, self.y, self.hypot)
|
||||||
|
|
||||||
|
for p in Point(3, 4), Point(14, 5/7.):
|
||||||
|
print p
|
||||||
|
|
||||||
|
class Point(namedtuple('Point', 'x y')):
|
||||||
|
'Point class with optimized _make() and _replace() without error-checking'
|
||||||
|
__slots__ = ()
|
||||||
|
_make = classmethod(tuple.__new__)
|
||||||
|
def _replace(self, _map=map, **kwds):
|
||||||
|
return self._make(_map(kwds.get, ('x', 'y'), self))
|
||||||
|
|
||||||
|
print Point(11, 22)._replace(x=100)
|
||||||
|
|
||||||
|
Point3D = namedtuple('Point3D', Point._fields + ('z',))
|
||||||
|
print Point3D.__doc__
|
||||||
|
|
||||||
|
import doctest
|
||||||
|
TestResults = namedtuple('TestResults', 'failed attempted')
|
||||||
|
print TestResults(*doctest.testmod())
|
BIN
icarus-miner/data/usr/lib/python2.6/collections.pyc
Normal file
BIN
icarus-miner/data/usr/lib/python2.6/collections.pyc
Normal file
Binary file not shown.
201
icarus-miner/data/usr/lib/python2.6/copy_reg.py
Normal file
201
icarus-miner/data/usr/lib/python2.6/copy_reg.py
Normal file
@ -0,0 +1,201 @@
|
|||||||
|
"""Helper to provide extensibility for pickle/cPickle.
|
||||||
|
|
||||||
|
This is only useful to add pickle support for extension types defined in
|
||||||
|
C, not for instances of user-defined classes.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from types import ClassType as _ClassType
|
||||||
|
|
||||||
|
__all__ = ["pickle", "constructor",
|
||||||
|
"add_extension", "remove_extension", "clear_extension_cache"]
|
||||||
|
|
||||||
|
dispatch_table = {}
|
||||||
|
|
||||||
|
def pickle(ob_type, pickle_function, constructor_ob=None):
|
||||||
|
if type(ob_type) is _ClassType:
|
||||||
|
raise TypeError("copy_reg is not intended for use with classes")
|
||||||
|
|
||||||
|
if not hasattr(pickle_function, '__call__'):
|
||||||
|
raise TypeError("reduction functions must be callable")
|
||||||
|
dispatch_table[ob_type] = pickle_function
|
||||||
|
|
||||||
|
# The constructor_ob function is a vestige of safe for unpickling.
|
||||||
|
# There is no reason for the caller to pass it anymore.
|
||||||
|
if constructor_ob is not None:
|
||||||
|
constructor(constructor_ob)
|
||||||
|
|
||||||
|
def constructor(object):
|
||||||
|
if not hasattr(object, '__call__'):
|
||||||
|
raise TypeError("constructors must be callable")
|
||||||
|
|
||||||
|
# Example: provide pickling support for complex numbers.
|
||||||
|
|
||||||
|
try:
|
||||||
|
complex
|
||||||
|
except NameError:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
|
||||||
|
def pickle_complex(c):
|
||||||
|
return complex, (c.real, c.imag)
|
||||||
|
|
||||||
|
pickle(complex, pickle_complex, complex)
|
||||||
|
|
||||||
|
# Support for pickling new-style objects
|
||||||
|
|
||||||
|
def _reconstructor(cls, base, state):
|
||||||
|
if base is object:
|
||||||
|
obj = object.__new__(cls)
|
||||||
|
else:
|
||||||
|
obj = base.__new__(cls, state)
|
||||||
|
if base.__init__ != object.__init__:
|
||||||
|
base.__init__(obj, state)
|
||||||
|
return obj
|
||||||
|
|
||||||
|
_HEAPTYPE = 1<<9
|
||||||
|
|
||||||
|
# Python code for object.__reduce_ex__ for protocols 0 and 1
|
||||||
|
|
||||||
|
def _reduce_ex(self, proto):
|
||||||
|
assert proto < 2
|
||||||
|
for base in self.__class__.__mro__:
|
||||||
|
if hasattr(base, '__flags__') and not base.__flags__ & _HEAPTYPE:
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
base = object # not really reachable
|
||||||
|
if base is object:
|
||||||
|
state = None
|
||||||
|
else:
|
||||||
|
if base is self.__class__:
|
||||||
|
raise TypeError, "can't pickle %s objects" % base.__name__
|
||||||
|
state = base(self)
|
||||||
|
args = (self.__class__, base, state)
|
||||||
|
try:
|
||||||
|
getstate = self.__getstate__
|
||||||
|
except AttributeError:
|
||||||
|
if getattr(self, "__slots__", None):
|
||||||
|
raise TypeError("a class that defines __slots__ without "
|
||||||
|
"defining __getstate__ cannot be pickled")
|
||||||
|
try:
|
||||||
|
dict = self.__dict__
|
||||||
|
except AttributeError:
|
||||||
|
dict = None
|
||||||
|
else:
|
||||||
|
dict = getstate()
|
||||||
|
if dict:
|
||||||
|
return _reconstructor, args, dict
|
||||||
|
else:
|
||||||
|
return _reconstructor, args
|
||||||
|
|
||||||
|
# Helper for __reduce_ex__ protocol 2
|
||||||
|
|
||||||
|
def __newobj__(cls, *args):
|
||||||
|
return cls.__new__(cls, *args)
|
||||||
|
|
||||||
|
def _slotnames(cls):
|
||||||
|
"""Return a list of slot names for a given class.
|
||||||
|
|
||||||
|
This needs to find slots defined by the class and its bases, so we
|
||||||
|
can't simply return the __slots__ attribute. We must walk down
|
||||||
|
the Method Resolution Order and concatenate the __slots__ of each
|
||||||
|
class found there. (This assumes classes don't modify their
|
||||||
|
__slots__ attribute to misrepresent their slots after the class is
|
||||||
|
defined.)
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Get the value from a cache in the class if possible
|
||||||
|
names = cls.__dict__.get("__slotnames__")
|
||||||
|
if names is not None:
|
||||||
|
return names
|
||||||
|
|
||||||
|
# Not cached -- calculate the value
|
||||||
|
names = []
|
||||||
|
if not hasattr(cls, "__slots__"):
|
||||||
|
# This class has no slots
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
# Slots found -- gather slot names from all base classes
|
||||||
|
for c in cls.__mro__:
|
||||||
|
if "__slots__" in c.__dict__:
|
||||||
|
slots = c.__dict__['__slots__']
|
||||||
|
# if class has a single slot, it can be given as a string
|
||||||
|
if isinstance(slots, basestring):
|
||||||
|
slots = (slots,)
|
||||||
|
for name in slots:
|
||||||
|
# special descriptors
|
||||||
|
if name in ("__dict__", "__weakref__"):
|
||||||
|
continue
|
||||||
|
# mangled names
|
||||||
|
elif name.startswith('__') and not name.endswith('__'):
|
||||||
|
names.append('_%s%s' % (c.__name__, name))
|
||||||
|
else:
|
||||||
|
names.append(name)
|
||||||
|
|
||||||
|
# Cache the outcome in the class if at all possible
|
||||||
|
try:
|
||||||
|
cls.__slotnames__ = names
|
||||||
|
except:
|
||||||
|
pass # But don't die if we can't
|
||||||
|
|
||||||
|
return names
|
||||||
|
|
||||||
|
# A registry of extension codes. This is an ad-hoc compression
|
||||||
|
# mechanism. Whenever a global reference to <module>, <name> is about
|
||||||
|
# to be pickled, the (<module>, <name>) tuple is looked up here to see
|
||||||
|
# if it is a registered extension code for it. Extension codes are
|
||||||
|
# universal, so that the meaning of a pickle does not depend on
|
||||||
|
# context. (There are also some codes reserved for local use that
|
||||||
|
# don't have this restriction.) Codes are positive ints; 0 is
|
||||||
|
# reserved.
|
||||||
|
|
||||||
|
_extension_registry = {} # key -> code
|
||||||
|
_inverted_registry = {} # code -> key
|
||||||
|
_extension_cache = {} # code -> object
|
||||||
|
# Don't ever rebind those names: cPickle grabs a reference to them when
|
||||||
|
# it's initialized, and won't see a rebinding.
|
||||||
|
|
||||||
|
def add_extension(module, name, code):
|
||||||
|
"""Register an extension code."""
|
||||||
|
code = int(code)
|
||||||
|
if not 1 <= code <= 0x7fffffff:
|
||||||
|
raise ValueError, "code out of range"
|
||||||
|
key = (module, name)
|
||||||
|
if (_extension_registry.get(key) == code and
|
||||||
|
_inverted_registry.get(code) == key):
|
||||||
|
return # Redundant registrations are benign
|
||||||
|
if key in _extension_registry:
|
||||||
|
raise ValueError("key %s is already registered with code %s" %
|
||||||
|
(key, _extension_registry[key]))
|
||||||
|
if code in _inverted_registry:
|
||||||
|
raise ValueError("code %s is already in use for key %s" %
|
||||||
|
(code, _inverted_registry[code]))
|
||||||
|
_extension_registry[key] = code
|
||||||
|
_inverted_registry[code] = key
|
||||||
|
|
||||||
|
def remove_extension(module, name, code):
|
||||||
|
"""Unregister an extension code. For testing only."""
|
||||||
|
key = (module, name)
|
||||||
|
if (_extension_registry.get(key) != code or
|
||||||
|
_inverted_registry.get(code) != key):
|
||||||
|
raise ValueError("key %s is not registered with code %s" %
|
||||||
|
(key, code))
|
||||||
|
del _extension_registry[key]
|
||||||
|
del _inverted_registry[code]
|
||||||
|
if code in _extension_cache:
|
||||||
|
del _extension_cache[code]
|
||||||
|
|
||||||
|
def clear_extension_cache():
|
||||||
|
_extension_cache.clear()
|
||||||
|
|
||||||
|
# Standard extension code assignments
|
||||||
|
|
||||||
|
# Reserved ranges
|
||||||
|
|
||||||
|
# First Last Count Purpose
|
||||||
|
# 1 127 127 Reserved for Python standard library
|
||||||
|
# 128 191 64 Reserved for Zope
|
||||||
|
# 192 239 48 Reserved for 3rd parties
|
||||||
|
# 240 255 16 Reserved for private use (will never be assigned)
|
||||||
|
# 256 Inf Inf Reserved for future assignment
|
||||||
|
|
||||||
|
# Extension codes are assigned by the Python Software Foundation.
|
BIN
icarus-miner/data/usr/lib/python2.6/copy_reg.pyc
Normal file
BIN
icarus-miner/data/usr/lib/python2.6/copy_reg.pyc
Normal file
Binary file not shown.
157
icarus-miner/data/usr/lib/python2.6/encodings/__init__.py
Normal file
157
icarus-miner/data/usr/lib/python2.6/encodings/__init__.py
Normal file
@ -0,0 +1,157 @@
|
|||||||
|
""" Standard "encodings" Package
|
||||||
|
|
||||||
|
Standard Python encoding modules are stored in this package
|
||||||
|
directory.
|
||||||
|
|
||||||
|
Codec modules must have names corresponding to normalized encoding
|
||||||
|
names as defined in the normalize_encoding() function below, e.g.
|
||||||
|
'utf-8' must be implemented by the module 'utf_8.py'.
|
||||||
|
|
||||||
|
Each codec module must export the following interface:
|
||||||
|
|
||||||
|
* getregentry() -> codecs.CodecInfo object
|
||||||
|
The getregentry() API must a CodecInfo object with encoder, decoder,
|
||||||
|
incrementalencoder, incrementaldecoder, streamwriter and streamreader
|
||||||
|
atttributes which adhere to the Python Codec Interface Standard.
|
||||||
|
|
||||||
|
In addition, a module may optionally also define the following
|
||||||
|
APIs which are then used by the package's codec search function:
|
||||||
|
|
||||||
|
* getaliases() -> sequence of encoding name strings to use as aliases
|
||||||
|
|
||||||
|
Alias names returned by getaliases() must be normalized encoding
|
||||||
|
names as defined by normalize_encoding().
|
||||||
|
|
||||||
|
Written by Marc-Andre Lemburg (mal@lemburg.com).
|
||||||
|
|
||||||
|
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
|
||||||
|
|
||||||
|
"""#"
|
||||||
|
|
||||||
|
import codecs
|
||||||
|
from encodings import aliases
|
||||||
|
import __builtin__
|
||||||
|
|
||||||
|
_cache = {}
|
||||||
|
_unknown = '--unknown--'
|
||||||
|
_import_tail = ['*']
|
||||||
|
_norm_encoding_map = (' . '
|
||||||
|
'0123456789 ABCDEFGHIJKLMNOPQRSTUVWXYZ '
|
||||||
|
' abcdefghijklmnopqrstuvwxyz '
|
||||||
|
' '
|
||||||
|
' '
|
||||||
|
' ')
|
||||||
|
_aliases = aliases.aliases
|
||||||
|
|
||||||
|
class CodecRegistryError(LookupError, SystemError):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def normalize_encoding(encoding):
|
||||||
|
|
||||||
|
""" Normalize an encoding name.
|
||||||
|
|
||||||
|
Normalization works as follows: all non-alphanumeric
|
||||||
|
characters except the dot used for Python package names are
|
||||||
|
collapsed and replaced with a single underscore, e.g. ' -;#'
|
||||||
|
becomes '_'. Leading and trailing underscores are removed.
|
||||||
|
|
||||||
|
Note that encoding names should be ASCII only; if they do use
|
||||||
|
non-ASCII characters, these must be Latin-1 compatible.
|
||||||
|
|
||||||
|
"""
|
||||||
|
# Make sure we have an 8-bit string, because .translate() works
|
||||||
|
# differently for Unicode strings.
|
||||||
|
if hasattr(__builtin__, "unicode") and isinstance(encoding, unicode):
|
||||||
|
# Note that .encode('latin-1') does *not* use the codec
|
||||||
|
# registry, so this call doesn't recurse. (See unicodeobject.c
|
||||||
|
# PyUnicode_AsEncodedString() for details)
|
||||||
|
encoding = encoding.encode('latin-1')
|
||||||
|
return '_'.join(encoding.translate(_norm_encoding_map).split())
|
||||||
|
|
||||||
|
def search_function(encoding):
|
||||||
|
|
||||||
|
# Cache lookup
|
||||||
|
entry = _cache.get(encoding, _unknown)
|
||||||
|
if entry is not _unknown:
|
||||||
|
return entry
|
||||||
|
|
||||||
|
# Import the module:
|
||||||
|
#
|
||||||
|
# First try to find an alias for the normalized encoding
|
||||||
|
# name and lookup the module using the aliased name, then try to
|
||||||
|
# lookup the module using the standard import scheme, i.e. first
|
||||||
|
# try in the encodings package, then at top-level.
|
||||||
|
#
|
||||||
|
norm_encoding = normalize_encoding(encoding)
|
||||||
|
aliased_encoding = _aliases.get(norm_encoding) or \
|
||||||
|
_aliases.get(norm_encoding.replace('.', '_'))
|
||||||
|
if aliased_encoding is not None:
|
||||||
|
modnames = [aliased_encoding,
|
||||||
|
norm_encoding]
|
||||||
|
else:
|
||||||
|
modnames = [norm_encoding]
|
||||||
|
for modname in modnames:
|
||||||
|
if not modname or '.' in modname:
|
||||||
|
continue
|
||||||
|
try:
|
||||||
|
# Import is absolute to prevent the possibly malicious import of a
|
||||||
|
# module with side-effects that is not in the 'encodings' package.
|
||||||
|
mod = __import__('encodings.' + modname, fromlist=_import_tail,
|
||||||
|
level=0)
|
||||||
|
except ImportError:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
mod = None
|
||||||
|
|
||||||
|
try:
|
||||||
|
getregentry = mod.getregentry
|
||||||
|
except AttributeError:
|
||||||
|
# Not a codec module
|
||||||
|
mod = None
|
||||||
|
|
||||||
|
if mod is None:
|
||||||
|
# Cache misses
|
||||||
|
_cache[encoding] = None
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Now ask the module for the registry entry
|
||||||
|
entry = getregentry()
|
||||||
|
if not isinstance(entry, codecs.CodecInfo):
|
||||||
|
if not 4 <= len(entry) <= 7:
|
||||||
|
raise CodecRegistryError,\
|
||||||
|
'module "%s" (%s) failed to register' % \
|
||||||
|
(mod.__name__, mod.__file__)
|
||||||
|
if not callable(entry[0]) or \
|
||||||
|
not callable(entry[1]) or \
|
||||||
|
(entry[2] is not None and not callable(entry[2])) or \
|
||||||
|
(entry[3] is not None and not callable(entry[3])) or \
|
||||||
|
(len(entry) > 4 and entry[4] is not None and not callable(entry[4])) or \
|
||||||
|
(len(entry) > 5 and entry[5] is not None and not callable(entry[5])):
|
||||||
|
raise CodecRegistryError,\
|
||||||
|
'incompatible codecs in module "%s" (%s)' % \
|
||||||
|
(mod.__name__, mod.__file__)
|
||||||
|
if len(entry)<7 or entry[6] is None:
|
||||||
|
entry += (None,)*(6-len(entry)) + (mod.__name__.split(".", 1)[1],)
|
||||||
|
entry = codecs.CodecInfo(*entry)
|
||||||
|
|
||||||
|
# Cache the codec registry entry
|
||||||
|
_cache[encoding] = entry
|
||||||
|
|
||||||
|
# Register its aliases (without overwriting previously registered
|
||||||
|
# aliases)
|
||||||
|
try:
|
||||||
|
codecaliases = mod.getaliases()
|
||||||
|
except AttributeError:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
for alias in codecaliases:
|
||||||
|
if not _aliases.has_key(alias):
|
||||||
|
_aliases[alias] = modname
|
||||||
|
|
||||||
|
# Return the registry entry
|
||||||
|
return entry
|
||||||
|
|
||||||
|
# Register the search_function in the Python codec registry
|
||||||
|
codecs.register(search_function)
|
BIN
icarus-miner/data/usr/lib/python2.6/encodings/__init__.pyc
Normal file
BIN
icarus-miner/data/usr/lib/python2.6/encodings/__init__.pyc
Normal file
Binary file not shown.
522
icarus-miner/data/usr/lib/python2.6/encodings/aliases.py
Normal file
522
icarus-miner/data/usr/lib/python2.6/encodings/aliases.py
Normal file
@ -0,0 +1,522 @@
|
|||||||
|
""" Encoding Aliases Support
|
||||||
|
|
||||||
|
This module is used by the encodings package search function to
|
||||||
|
map encodings names to module names.
|
||||||
|
|
||||||
|
Note that the search function normalizes the encoding names before
|
||||||
|
doing the lookup, so the mapping will have to map normalized
|
||||||
|
encoding names to module names.
|
||||||
|
|
||||||
|
Contents:
|
||||||
|
|
||||||
|
The following aliases dictionary contains mappings of all IANA
|
||||||
|
character set names for which the Python core library provides
|
||||||
|
codecs. In addition to these, a few Python specific codec
|
||||||
|
aliases have also been added.
|
||||||
|
|
||||||
|
"""
|
||||||
|
aliases = {
|
||||||
|
|
||||||
|
# Please keep this list sorted alphabetically by value !
|
||||||
|
|
||||||
|
# ascii codec
|
||||||
|
'646' : 'ascii',
|
||||||
|
'ansi_x3.4_1968' : 'ascii',
|
||||||
|
'ansi_x3_4_1968' : 'ascii', # some email headers use this non-standard name
|
||||||
|
'ansi_x3.4_1986' : 'ascii',
|
||||||
|
'cp367' : 'ascii',
|
||||||
|
'csascii' : 'ascii',
|
||||||
|
'ibm367' : 'ascii',
|
||||||
|
'iso646_us' : 'ascii',
|
||||||
|
'iso_646.irv_1991' : 'ascii',
|
||||||
|
'iso_ir_6' : 'ascii',
|
||||||
|
'us' : 'ascii',
|
||||||
|
'us_ascii' : 'ascii',
|
||||||
|
|
||||||
|
# base64_codec codec
|
||||||
|
'base64' : 'base64_codec',
|
||||||
|
'base_64' : 'base64_codec',
|
||||||
|
|
||||||
|
# big5 codec
|
||||||
|
'big5_tw' : 'big5',
|
||||||
|
'csbig5' : 'big5',
|
||||||
|
|
||||||
|
# big5hkscs codec
|
||||||
|
'big5_hkscs' : 'big5hkscs',
|
||||||
|
'hkscs' : 'big5hkscs',
|
||||||
|
|
||||||
|
# bz2_codec codec
|
||||||
|
'bz2' : 'bz2_codec',
|
||||||
|
|
||||||
|
# cp037 codec
|
||||||
|
'037' : 'cp037',
|
||||||
|
'csibm037' : 'cp037',
|
||||||
|
'ebcdic_cp_ca' : 'cp037',
|
||||||
|
'ebcdic_cp_nl' : 'cp037',
|
||||||
|
'ebcdic_cp_us' : 'cp037',
|
||||||
|
'ebcdic_cp_wt' : 'cp037',
|
||||||
|
'ibm037' : 'cp037',
|
||||||
|
'ibm039' : 'cp037',
|
||||||
|
|
||||||
|
# cp1026 codec
|
||||||
|
'1026' : 'cp1026',
|
||||||
|
'csibm1026' : 'cp1026',
|
||||||
|
'ibm1026' : 'cp1026',
|
||||||
|
|
||||||
|
# cp1140 codec
|
||||||
|
'1140' : 'cp1140',
|
||||||
|
'ibm1140' : 'cp1140',
|
||||||
|
|
||||||
|
# cp1250 codec
|
||||||
|
'1250' : 'cp1250',
|
||||||
|
'windows_1250' : 'cp1250',
|
||||||
|
|
||||||
|
# cp1251 codec
|
||||||
|
'1251' : 'cp1251',
|
||||||
|
'windows_1251' : 'cp1251',
|
||||||
|
|
||||||
|
# cp1252 codec
|
||||||
|
'1252' : 'cp1252',
|
||||||
|
'windows_1252' : 'cp1252',
|
||||||
|
|
||||||
|
# cp1253 codec
|
||||||
|
'1253' : 'cp1253',
|
||||||
|
'windows_1253' : 'cp1253',
|
||||||
|
|
||||||
|
# cp1254 codec
|
||||||
|
'1254' : 'cp1254',
|
||||||
|
'windows_1254' : 'cp1254',
|
||||||
|
|
||||||
|
# cp1255 codec
|
||||||
|
'1255' : 'cp1255',
|
||||||
|
'windows_1255' : 'cp1255',
|
||||||
|
|
||||||
|
# cp1256 codec
|
||||||
|
'1256' : 'cp1256',
|
||||||
|
'windows_1256' : 'cp1256',
|
||||||
|
|
||||||
|
# cp1257 codec
|
||||||
|
'1257' : 'cp1257',
|
||||||
|
'windows_1257' : 'cp1257',
|
||||||
|
|
||||||
|
# cp1258 codec
|
||||||
|
'1258' : 'cp1258',
|
||||||
|
'windows_1258' : 'cp1258',
|
||||||
|
|
||||||
|
# cp424 codec
|
||||||
|
'424' : 'cp424',
|
||||||
|
'csibm424' : 'cp424',
|
||||||
|
'ebcdic_cp_he' : 'cp424',
|
||||||
|
'ibm424' : 'cp424',
|
||||||
|
|
||||||
|
# cp437 codec
|
||||||
|
'437' : 'cp437',
|
||||||
|
'cspc8codepage437' : 'cp437',
|
||||||
|
'ibm437' : 'cp437',
|
||||||
|
|
||||||
|
# cp500 codec
|
||||||
|
'500' : 'cp500',
|
||||||
|
'csibm500' : 'cp500',
|
||||||
|
'ebcdic_cp_be' : 'cp500',
|
||||||
|
'ebcdic_cp_ch' : 'cp500',
|
||||||
|
'ibm500' : 'cp500',
|
||||||
|
|
||||||
|
# cp775 codec
|
||||||
|
'775' : 'cp775',
|
||||||
|
'cspc775baltic' : 'cp775',
|
||||||
|
'ibm775' : 'cp775',
|
||||||
|
|
||||||
|
# cp850 codec
|
||||||
|
'850' : 'cp850',
|
||||||
|
'cspc850multilingual' : 'cp850',
|
||||||
|
'ibm850' : 'cp850',
|
||||||
|
|
||||||
|
# cp852 codec
|
||||||
|
'852' : 'cp852',
|
||||||
|
'cspcp852' : 'cp852',
|
||||||
|
'ibm852' : 'cp852',
|
||||||
|
|
||||||
|
# cp855 codec
|
||||||
|
'855' : 'cp855',
|
||||||
|
'csibm855' : 'cp855',
|
||||||
|
'ibm855' : 'cp855',
|
||||||
|
|
||||||
|
# cp857 codec
|
||||||
|
'857' : 'cp857',
|
||||||
|
'csibm857' : 'cp857',
|
||||||
|
'ibm857' : 'cp857',
|
||||||
|
|
||||||
|
# cp860 codec
|
||||||
|
'860' : 'cp860',
|
||||||
|
'csibm860' : 'cp860',
|
||||||
|
'ibm860' : 'cp860',
|
||||||
|
|
||||||
|
# cp861 codec
|
||||||
|
'861' : 'cp861',
|
||||||
|
'cp_is' : 'cp861',
|
||||||
|
'csibm861' : 'cp861',
|
||||||
|
'ibm861' : 'cp861',
|
||||||
|
|
||||||
|
# cp862 codec
|
||||||
|
'862' : 'cp862',
|
||||||
|
'cspc862latinhebrew' : 'cp862',
|
||||||
|
'ibm862' : 'cp862',
|
||||||
|
|
||||||
|
# cp863 codec
|
||||||
|
'863' : 'cp863',
|
||||||
|
'csibm863' : 'cp863',
|
||||||
|
'ibm863' : 'cp863',
|
||||||
|
|
||||||
|
# cp864 codec
|
||||||
|
'864' : 'cp864',
|
||||||
|
'csibm864' : 'cp864',
|
||||||
|
'ibm864' : 'cp864',
|
||||||
|
|
||||||
|
# cp865 codec
|
||||||
|
'865' : 'cp865',
|
||||||
|
'csibm865' : 'cp865',
|
||||||
|
'ibm865' : 'cp865',
|
||||||
|
|
||||||
|
# cp866 codec
|
||||||
|
'866' : 'cp866',
|
||||||
|
'csibm866' : 'cp866',
|
||||||
|
'ibm866' : 'cp866',
|
||||||
|
|
||||||
|
# cp869 codec
|
||||||
|
'869' : 'cp869',
|
||||||
|
'cp_gr' : 'cp869',
|
||||||
|
'csibm869' : 'cp869',
|
||||||
|
'ibm869' : 'cp869',
|
||||||
|
|
||||||
|
# cp932 codec
|
||||||
|
'932' : 'cp932',
|
||||||
|
'ms932' : 'cp932',
|
||||||
|
'mskanji' : 'cp932',
|
||||||
|
'ms_kanji' : 'cp932',
|
||||||
|
|
||||||
|
# cp949 codec
|
||||||
|
'949' : 'cp949',
|
||||||
|
'ms949' : 'cp949',
|
||||||
|
'uhc' : 'cp949',
|
||||||
|
|
||||||
|
# cp950 codec
|
||||||
|
'950' : 'cp950',
|
||||||
|
'ms950' : 'cp950',
|
||||||
|
|
||||||
|
# euc_jis_2004 codec
|
||||||
|
'jisx0213' : 'euc_jis_2004',
|
||||||
|
'eucjis2004' : 'euc_jis_2004',
|
||||||
|
'euc_jis2004' : 'euc_jis_2004',
|
||||||
|
|
||||||
|
# euc_jisx0213 codec
|
||||||
|
'eucjisx0213' : 'euc_jisx0213',
|
||||||
|
|
||||||
|
# euc_jp codec
|
||||||
|
'eucjp' : 'euc_jp',
|
||||||
|
'ujis' : 'euc_jp',
|
||||||
|
'u_jis' : 'euc_jp',
|
||||||
|
|
||||||
|
# euc_kr codec
|
||||||
|
'euckr' : 'euc_kr',
|
||||||
|
'korean' : 'euc_kr',
|
||||||
|
'ksc5601' : 'euc_kr',
|
||||||
|
'ks_c_5601' : 'euc_kr',
|
||||||
|
'ks_c_5601_1987' : 'euc_kr',
|
||||||
|
'ksx1001' : 'euc_kr',
|
||||||
|
'ks_x_1001' : 'euc_kr',
|
||||||
|
|
||||||
|
# gb18030 codec
|
||||||
|
'gb18030_2000' : 'gb18030',
|
||||||
|
|
||||||
|
# gb2312 codec
|
||||||
|
'chinese' : 'gb2312',
|
||||||
|
'csiso58gb231280' : 'gb2312',
|
||||||
|
'euc_cn' : 'gb2312',
|
||||||
|
'euccn' : 'gb2312',
|
||||||
|
'eucgb2312_cn' : 'gb2312',
|
||||||
|
'gb2312_1980' : 'gb2312',
|
||||||
|
'gb2312_80' : 'gb2312',
|
||||||
|
'iso_ir_58' : 'gb2312',
|
||||||
|
|
||||||
|
# gbk codec
|
||||||
|
'936' : 'gbk',
|
||||||
|
'cp936' : 'gbk',
|
||||||
|
'ms936' : 'gbk',
|
||||||
|
|
||||||
|
# hex_codec codec
|
||||||
|
'hex' : 'hex_codec',
|
||||||
|
|
||||||
|
# hp_roman8 codec
|
||||||
|
'roman8' : 'hp_roman8',
|
||||||
|
'r8' : 'hp_roman8',
|
||||||
|
'csHPRoman8' : 'hp_roman8',
|
||||||
|
|
||||||
|
# hz codec
|
||||||
|
'hzgb' : 'hz',
|
||||||
|
'hz_gb' : 'hz',
|
||||||
|
'hz_gb_2312' : 'hz',
|
||||||
|
|
||||||
|
# iso2022_jp codec
|
||||||
|
'csiso2022jp' : 'iso2022_jp',
|
||||||
|
'iso2022jp' : 'iso2022_jp',
|
||||||
|
'iso_2022_jp' : 'iso2022_jp',
|
||||||
|
|
||||||
|
# iso2022_jp_1 codec
|
||||||
|
'iso2022jp_1' : 'iso2022_jp_1',
|
||||||
|
'iso_2022_jp_1' : 'iso2022_jp_1',
|
||||||
|
|
||||||
|
# iso2022_jp_2 codec
|
||||||
|
'iso2022jp_2' : 'iso2022_jp_2',
|
||||||
|
'iso_2022_jp_2' : 'iso2022_jp_2',
|
||||||
|
|
||||||
|
# iso2022_jp_2004 codec
|
||||||
|
'iso_2022_jp_2004' : 'iso2022_jp_2004',
|
||||||
|
'iso2022jp_2004' : 'iso2022_jp_2004',
|
||||||
|
|
||||||
|
# iso2022_jp_3 codec
|
||||||
|
'iso2022jp_3' : 'iso2022_jp_3',
|
||||||
|
'iso_2022_jp_3' : 'iso2022_jp_3',
|
||||||
|
|
||||||
|
# iso2022_jp_ext codec
|
||||||
|
'iso2022jp_ext' : 'iso2022_jp_ext',
|
||||||
|
'iso_2022_jp_ext' : 'iso2022_jp_ext',
|
||||||
|
|
||||||
|
# iso2022_kr codec
|
||||||
|
'csiso2022kr' : 'iso2022_kr',
|
||||||
|
'iso2022kr' : 'iso2022_kr',
|
||||||
|
'iso_2022_kr' : 'iso2022_kr',
|
||||||
|
|
||||||
|
# iso8859_10 codec
|
||||||
|
'csisolatin6' : 'iso8859_10',
|
||||||
|
'iso_8859_10' : 'iso8859_10',
|
||||||
|
'iso_8859_10_1992' : 'iso8859_10',
|
||||||
|
'iso_ir_157' : 'iso8859_10',
|
||||||
|
'l6' : 'iso8859_10',
|
||||||
|
'latin6' : 'iso8859_10',
|
||||||
|
|
||||||
|
# iso8859_11 codec
|
||||||
|
'thai' : 'iso8859_11',
|
||||||
|
'iso_8859_11' : 'iso8859_11',
|
||||||
|
'iso_8859_11_2001' : 'iso8859_11',
|
||||||
|
|
||||||
|
# iso8859_13 codec
|
||||||
|
'iso_8859_13' : 'iso8859_13',
|
||||||
|
'l7' : 'iso8859_13',
|
||||||
|
'latin7' : 'iso8859_13',
|
||||||
|
|
||||||
|
# iso8859_14 codec
|
||||||
|
'iso_8859_14' : 'iso8859_14',
|
||||||
|
'iso_8859_14_1998' : 'iso8859_14',
|
||||||
|
'iso_celtic' : 'iso8859_14',
|
||||||
|
'iso_ir_199' : 'iso8859_14',
|
||||||
|
'l8' : 'iso8859_14',
|
||||||
|
'latin8' : 'iso8859_14',
|
||||||
|
|
||||||
|
# iso8859_15 codec
|
||||||
|
'iso_8859_15' : 'iso8859_15',
|
||||||
|
'l9' : 'iso8859_15',
|
||||||
|
'latin9' : 'iso8859_15',
|
||||||
|
|
||||||
|
# iso8859_16 codec
|
||||||
|
'iso_8859_16' : 'iso8859_16',
|
||||||
|
'iso_8859_16_2001' : 'iso8859_16',
|
||||||
|
'iso_ir_226' : 'iso8859_16',
|
||||||
|
'l10' : 'iso8859_16',
|
||||||
|
'latin10' : 'iso8859_16',
|
||||||
|
|
||||||
|
# iso8859_2 codec
|
||||||
|
'csisolatin2' : 'iso8859_2',
|
||||||
|
'iso_8859_2' : 'iso8859_2',
|
||||||
|
'iso_8859_2_1987' : 'iso8859_2',
|
||||||
|
'iso_ir_101' : 'iso8859_2',
|
||||||
|
'l2' : 'iso8859_2',
|
||||||
|
'latin2' : 'iso8859_2',
|
||||||
|
|
||||||
|
# iso8859_3 codec
|
||||||
|
'csisolatin3' : 'iso8859_3',
|
||||||
|
'iso_8859_3' : 'iso8859_3',
|
||||||
|
'iso_8859_3_1988' : 'iso8859_3',
|
||||||
|
'iso_ir_109' : 'iso8859_3',
|
||||||
|
'l3' : 'iso8859_3',
|
||||||
|
'latin3' : 'iso8859_3',
|
||||||
|
|
||||||
|
# iso8859_4 codec
|
||||||
|
'csisolatin4' : 'iso8859_4',
|
||||||
|
'iso_8859_4' : 'iso8859_4',
|
||||||
|
'iso_8859_4_1988' : 'iso8859_4',
|
||||||
|
'iso_ir_110' : 'iso8859_4',
|
||||||
|
'l4' : 'iso8859_4',
|
||||||
|
'latin4' : 'iso8859_4',
|
||||||
|
|
||||||
|
# iso8859_5 codec
|
||||||
|
'csisolatincyrillic' : 'iso8859_5',
|
||||||
|
'cyrillic' : 'iso8859_5',
|
||||||
|
'iso_8859_5' : 'iso8859_5',
|
||||||
|
'iso_8859_5_1988' : 'iso8859_5',
|
||||||
|
'iso_ir_144' : 'iso8859_5',
|
||||||
|
|
||||||
|
# iso8859_6 codec
|
||||||
|
'arabic' : 'iso8859_6',
|
||||||
|
'asmo_708' : 'iso8859_6',
|
||||||
|
'csisolatinarabic' : 'iso8859_6',
|
||||||
|
'ecma_114' : 'iso8859_6',
|
||||||
|
'iso_8859_6' : 'iso8859_6',
|
||||||
|
'iso_8859_6_1987' : 'iso8859_6',
|
||||||
|
'iso_ir_127' : 'iso8859_6',
|
||||||
|
|
||||||
|
# iso8859_7 codec
|
||||||
|
'csisolatingreek' : 'iso8859_7',
|
||||||
|
'ecma_118' : 'iso8859_7',
|
||||||
|
'elot_928' : 'iso8859_7',
|
||||||
|
'greek' : 'iso8859_7',
|
||||||
|
'greek8' : 'iso8859_7',
|
||||||
|
'iso_8859_7' : 'iso8859_7',
|
||||||
|
'iso_8859_7_1987' : 'iso8859_7',
|
||||||
|
'iso_ir_126' : 'iso8859_7',
|
||||||
|
|
||||||
|
# iso8859_8 codec
|
||||||
|
'csisolatinhebrew' : 'iso8859_8',
|
||||||
|
'hebrew' : 'iso8859_8',
|
||||||
|
'iso_8859_8' : 'iso8859_8',
|
||||||
|
'iso_8859_8_1988' : 'iso8859_8',
|
||||||
|
'iso_ir_138' : 'iso8859_8',
|
||||||
|
|
||||||
|
# iso8859_9 codec
|
||||||
|
'csisolatin5' : 'iso8859_9',
|
||||||
|
'iso_8859_9' : 'iso8859_9',
|
||||||
|
'iso_8859_9_1989' : 'iso8859_9',
|
||||||
|
'iso_ir_148' : 'iso8859_9',
|
||||||
|
'l5' : 'iso8859_9',
|
||||||
|
'latin5' : 'iso8859_9',
|
||||||
|
|
||||||
|
# johab codec
|
||||||
|
'cp1361' : 'johab',
|
||||||
|
'ms1361' : 'johab',
|
||||||
|
|
||||||
|
# koi8_r codec
|
||||||
|
'cskoi8r' : 'koi8_r',
|
||||||
|
|
||||||
|
# latin_1 codec
|
||||||
|
#
|
||||||
|
# Note that the latin_1 codec is implemented internally in C and a
|
||||||
|
# lot faster than the charmap codec iso8859_1 which uses the same
|
||||||
|
# encoding. This is why we discourage the use of the iso8859_1
|
||||||
|
# codec and alias it to latin_1 instead.
|
||||||
|
#
|
||||||
|
'8859' : 'latin_1',
|
||||||
|
'cp819' : 'latin_1',
|
||||||
|
'csisolatin1' : 'latin_1',
|
||||||
|
'ibm819' : 'latin_1',
|
||||||
|
'iso8859' : 'latin_1',
|
||||||
|
'iso8859_1' : 'latin_1',
|
||||||
|
'iso_8859_1' : 'latin_1',
|
||||||
|
'iso_8859_1_1987' : 'latin_1',
|
||||||
|
'iso_ir_100' : 'latin_1',
|
||||||
|
'l1' : 'latin_1',
|
||||||
|
'latin' : 'latin_1',
|
||||||
|
'latin1' : 'latin_1',
|
||||||
|
|
||||||
|
# mac_cyrillic codec
|
||||||
|
'maccyrillic' : 'mac_cyrillic',
|
||||||
|
|
||||||
|
# mac_greek codec
|
||||||
|
'macgreek' : 'mac_greek',
|
||||||
|
|
||||||
|
# mac_iceland codec
|
||||||
|
'maciceland' : 'mac_iceland',
|
||||||
|
|
||||||
|
# mac_latin2 codec
|
||||||
|
'maccentraleurope' : 'mac_latin2',
|
||||||
|
'maclatin2' : 'mac_latin2',
|
||||||
|
|
||||||
|
# mac_roman codec
|
||||||
|
'macroman' : 'mac_roman',
|
||||||
|
|
||||||
|
# mac_turkish codec
|
||||||
|
'macturkish' : 'mac_turkish',
|
||||||
|
|
||||||
|
# mbcs codec
|
||||||
|
'dbcs' : 'mbcs',
|
||||||
|
|
||||||
|
# ptcp154 codec
|
||||||
|
'csptcp154' : 'ptcp154',
|
||||||
|
'pt154' : 'ptcp154',
|
||||||
|
'cp154' : 'ptcp154',
|
||||||
|
'cyrillic-asian' : 'ptcp154',
|
||||||
|
|
||||||
|
# quopri_codec codec
|
||||||
|
'quopri' : 'quopri_codec',
|
||||||
|
'quoted_printable' : 'quopri_codec',
|
||||||
|
'quotedprintable' : 'quopri_codec',
|
||||||
|
|
||||||
|
# rot_13 codec
|
||||||
|
'rot13' : 'rot_13',
|
||||||
|
|
||||||
|
# shift_jis codec
|
||||||
|
'csshiftjis' : 'shift_jis',
|
||||||
|
'shiftjis' : 'shift_jis',
|
||||||
|
'sjis' : 'shift_jis',
|
||||||
|
's_jis' : 'shift_jis',
|
||||||
|
|
||||||
|
# shift_jis_2004 codec
|
||||||
|
'shiftjis2004' : 'shift_jis_2004',
|
||||||
|
'sjis_2004' : 'shift_jis_2004',
|
||||||
|
's_jis_2004' : 'shift_jis_2004',
|
||||||
|
|
||||||
|
# shift_jisx0213 codec
|
||||||
|
'shiftjisx0213' : 'shift_jisx0213',
|
||||||
|
'sjisx0213' : 'shift_jisx0213',
|
||||||
|
's_jisx0213' : 'shift_jisx0213',
|
||||||
|
|
||||||
|
# tactis codec
|
||||||
|
'tis260' : 'tactis',
|
||||||
|
|
||||||
|
# tis_620 codec
|
||||||
|
'tis620' : 'tis_620',
|
||||||
|
'tis_620_0' : 'tis_620',
|
||||||
|
'tis_620_2529_0' : 'tis_620',
|
||||||
|
'tis_620_2529_1' : 'tis_620',
|
||||||
|
'iso_ir_166' : 'tis_620',
|
||||||
|
|
||||||
|
# utf_16 codec
|
||||||
|
'u16' : 'utf_16',
|
||||||
|
'utf16' : 'utf_16',
|
||||||
|
|
||||||
|
# utf_16_be codec
|
||||||
|
'unicodebigunmarked' : 'utf_16_be',
|
||||||
|
'utf_16be' : 'utf_16_be',
|
||||||
|
|
||||||
|
# utf_16_le codec
|
||||||
|
'unicodelittleunmarked' : 'utf_16_le',
|
||||||
|
'utf_16le' : 'utf_16_le',
|
||||||
|
|
||||||
|
# utf_32 codec
|
||||||
|
'u32' : 'utf_32',
|
||||||
|
'utf32' : 'utf_32',
|
||||||
|
|
||||||
|
# utf_32_be codec
|
||||||
|
'utf_32be' : 'utf_32_be',
|
||||||
|
|
||||||
|
# utf_32_le codec
|
||||||
|
'utf_32le' : 'utf_32_le',
|
||||||
|
|
||||||
|
# utf_7 codec
|
||||||
|
'u7' : 'utf_7',
|
||||||
|
'utf7' : 'utf_7',
|
||||||
|
'unicode_1_1_utf_7' : 'utf_7',
|
||||||
|
|
||||||
|
# utf_8 codec
|
||||||
|
'u8' : 'utf_8',
|
||||||
|
'utf' : 'utf_8',
|
||||||
|
'utf8' : 'utf_8',
|
||||||
|
'utf8_ucs2' : 'utf_8',
|
||||||
|
'utf8_ucs4' : 'utf_8',
|
||||||
|
|
||||||
|
# uu_codec codec
|
||||||
|
'uu' : 'uu_codec',
|
||||||
|
|
||||||
|
# zlib_codec codec
|
||||||
|
'zip' : 'zlib_codec',
|
||||||
|
'zlib' : 'zlib_codec',
|
||||||
|
|
||||||
|
}
|
BIN
icarus-miner/data/usr/lib/python2.6/encodings/aliases.pyc
Normal file
BIN
icarus-miner/data/usr/lib/python2.6/encodings/aliases.pyc
Normal file
Binary file not shown.
50
icarus-miner/data/usr/lib/python2.6/encodings/ascii.py
Normal file
50
icarus-miner/data/usr/lib/python2.6/encodings/ascii.py
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
""" Python 'ascii' Codec
|
||||||
|
|
||||||
|
|
||||||
|
Written by Marc-Andre Lemburg (mal@lemburg.com).
|
||||||
|
|
||||||
|
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
|
||||||
|
|
||||||
|
"""
|
||||||
|
import codecs
|
||||||
|
|
||||||
|
### Codec APIs
|
||||||
|
|
||||||
|
class Codec(codecs.Codec):
|
||||||
|
|
||||||
|
# Note: Binding these as C functions will result in the class not
|
||||||
|
# converting them to methods. This is intended.
|
||||||
|
encode = codecs.ascii_encode
|
||||||
|
decode = codecs.ascii_decode
|
||||||
|
|
||||||
|
class IncrementalEncoder(codecs.IncrementalEncoder):
|
||||||
|
def encode(self, input, final=False):
|
||||||
|
return codecs.ascii_encode(input, self.errors)[0]
|
||||||
|
|
||||||
|
class IncrementalDecoder(codecs.IncrementalDecoder):
|
||||||
|
def decode(self, input, final=False):
|
||||||
|
return codecs.ascii_decode(input, self.errors)[0]
|
||||||
|
|
||||||
|
class StreamWriter(Codec,codecs.StreamWriter):
|
||||||
|
pass
|
||||||
|
|
||||||
|
class StreamReader(Codec,codecs.StreamReader):
|
||||||
|
pass
|
||||||
|
|
||||||
|
class StreamConverter(StreamWriter,StreamReader):
|
||||||
|
|
||||||
|
encode = codecs.ascii_decode
|
||||||
|
decode = codecs.ascii_encode
|
||||||
|
|
||||||
|
### encodings module API
|
||||||
|
|
||||||
|
def getregentry():
|
||||||
|
return codecs.CodecInfo(
|
||||||
|
name='ascii',
|
||||||
|
encode=Codec.encode,
|
||||||
|
decode=Codec.decode,
|
||||||
|
incrementalencoder=IncrementalEncoder,
|
||||||
|
incrementaldecoder=IncrementalDecoder,
|
||||||
|
streamwriter=StreamWriter,
|
||||||
|
streamreader=StreamReader,
|
||||||
|
)
|
BIN
icarus-miner/data/usr/lib/python2.6/encodings/ascii.pyc
Normal file
BIN
icarus-miner/data/usr/lib/python2.6/encodings/ascii.pyc
Normal file
Binary file not shown.
79
icarus-miner/data/usr/lib/python2.6/encodings/hex_codec.py
Normal file
79
icarus-miner/data/usr/lib/python2.6/encodings/hex_codec.py
Normal file
@ -0,0 +1,79 @@
|
|||||||
|
""" Python 'hex_codec' Codec - 2-digit hex content transfer encoding
|
||||||
|
|
||||||
|
Unlike most of the other codecs which target Unicode, this codec
|
||||||
|
will return Python string objects for both encode and decode.
|
||||||
|
|
||||||
|
Written by Marc-Andre Lemburg (mal@lemburg.com).
|
||||||
|
|
||||||
|
"""
|
||||||
|
import codecs, binascii
|
||||||
|
|
||||||
|
### Codec APIs
|
||||||
|
|
||||||
|
def hex_encode(input,errors='strict'):
|
||||||
|
|
||||||
|
""" Encodes the object input and returns a tuple (output
|
||||||
|
object, length consumed).
|
||||||
|
|
||||||
|
errors defines the error handling to apply. It defaults to
|
||||||
|
'strict' handling which is the only currently supported
|
||||||
|
error handling for this codec.
|
||||||
|
|
||||||
|
"""
|
||||||
|
assert errors == 'strict'
|
||||||
|
output = binascii.b2a_hex(input)
|
||||||
|
return (output, len(input))
|
||||||
|
|
||||||
|
def hex_decode(input,errors='strict'):
|
||||||
|
|
||||||
|
""" Decodes the object input and returns a tuple (output
|
||||||
|
object, length consumed).
|
||||||
|
|
||||||
|
input must be an object which provides the bf_getreadbuf
|
||||||
|
buffer slot. Python strings, buffer objects and memory
|
||||||
|
mapped files are examples of objects providing this slot.
|
||||||
|
|
||||||
|
errors defines the error handling to apply. It defaults to
|
||||||
|
'strict' handling which is the only currently supported
|
||||||
|
error handling for this codec.
|
||||||
|
|
||||||
|
"""
|
||||||
|
assert errors == 'strict'
|
||||||
|
output = binascii.a2b_hex(input)
|
||||||
|
return (output, len(input))
|
||||||
|
|
||||||
|
class Codec(codecs.Codec):
|
||||||
|
|
||||||
|
def encode(self, input,errors='strict'):
|
||||||
|
return hex_encode(input,errors)
|
||||||
|
def decode(self, input,errors='strict'):
|
||||||
|
return hex_decode(input,errors)
|
||||||
|
|
||||||
|
class IncrementalEncoder(codecs.IncrementalEncoder):
|
||||||
|
def encode(self, input, final=False):
|
||||||
|
assert self.errors == 'strict'
|
||||||
|
return binascii.b2a_hex(input)
|
||||||
|
|
||||||
|
class IncrementalDecoder(codecs.IncrementalDecoder):
|
||||||
|
def decode(self, input, final=False):
|
||||||
|
assert self.errors == 'strict'
|
||||||
|
return binascii.a2b_hex(input)
|
||||||
|
|
||||||
|
class StreamWriter(Codec,codecs.StreamWriter):
|
||||||
|
pass
|
||||||
|
|
||||||
|
class StreamReader(Codec,codecs.StreamReader):
|
||||||
|
pass
|
||||||
|
|
||||||
|
### encodings module API
|
||||||
|
|
||||||
|
def getregentry():
|
||||||
|
return codecs.CodecInfo(
|
||||||
|
name='hex',
|
||||||
|
encode=hex_encode,
|
||||||
|
decode=hex_decode,
|
||||||
|
incrementalencoder=IncrementalEncoder,
|
||||||
|
incrementaldecoder=IncrementalDecoder,
|
||||||
|
streamwriter=StreamWriter,
|
||||||
|
streamreader=StreamReader,
|
||||||
|
)
|
BIN
icarus-miner/data/usr/lib/python2.6/encodings/hex_codec.pyc
Normal file
BIN
icarus-miner/data/usr/lib/python2.6/encodings/hex_codec.pyc
Normal file
Binary file not shown.
51
icarus-miner/data/usr/lib/python2.6/functools.py
Normal file
51
icarus-miner/data/usr/lib/python2.6/functools.py
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
"""functools.py - Tools for working with functions and callable objects
|
||||||
|
"""
|
||||||
|
# Python module wrapper for _functools C module
|
||||||
|
# to allow utilities written in Python to be added
|
||||||
|
# to the functools module.
|
||||||
|
# Written by Nick Coghlan <ncoghlan at gmail.com>
|
||||||
|
# Copyright (C) 2006 Python Software Foundation.
|
||||||
|
# See C source code for _functools credits/copyright
|
||||||
|
|
||||||
|
from _functools import partial, reduce
|
||||||
|
|
||||||
|
# update_wrapper() and wraps() are tools to help write
|
||||||
|
# wrapper functions that can handle naive introspection
|
||||||
|
|
||||||
|
WRAPPER_ASSIGNMENTS = ('__module__', '__name__', '__doc__')
|
||||||
|
WRAPPER_UPDATES = ('__dict__',)
|
||||||
|
def update_wrapper(wrapper,
|
||||||
|
wrapped,
|
||||||
|
assigned = WRAPPER_ASSIGNMENTS,
|
||||||
|
updated = WRAPPER_UPDATES):
|
||||||
|
"""Update a wrapper function to look like the wrapped function
|
||||||
|
|
||||||
|
wrapper is the function to be updated
|
||||||
|
wrapped is the original function
|
||||||
|
assigned is a tuple naming the attributes assigned directly
|
||||||
|
from the wrapped function to the wrapper function (defaults to
|
||||||
|
functools.WRAPPER_ASSIGNMENTS)
|
||||||
|
updated is a tuple naming the attributes of the wrapper that
|
||||||
|
are updated with the corresponding attribute from the wrapped
|
||||||
|
function (defaults to functools.WRAPPER_UPDATES)
|
||||||
|
"""
|
||||||
|
for attr in assigned:
|
||||||
|
setattr(wrapper, attr, getattr(wrapped, attr))
|
||||||
|
for attr in updated:
|
||||||
|
getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
|
||||||
|
# Return the wrapper so this can be used as a decorator via partial()
|
||||||
|
return wrapper
|
||||||
|
|
||||||
|
def wraps(wrapped,
|
||||||
|
assigned = WRAPPER_ASSIGNMENTS,
|
||||||
|
updated = WRAPPER_UPDATES):
|
||||||
|
"""Decorator factory to apply update_wrapper() to a wrapper function
|
||||||
|
|
||||||
|
Returns a decorator that invokes update_wrapper() with the decorated
|
||||||
|
function as the wrapper argument and the arguments to wraps() as the
|
||||||
|
remaining arguments. Default arguments are as for update_wrapper().
|
||||||
|
This is a convenience function to simplify applying partial() to
|
||||||
|
update_wrapper().
|
||||||
|
"""
|
||||||
|
return partial(update_wrapper, wrapped=wrapped,
|
||||||
|
assigned=assigned, updated=updated)
|
BIN
icarus-miner/data/usr/lib/python2.6/functools.pyc
Normal file
BIN
icarus-miner/data/usr/lib/python2.6/functools.pyc
Normal file
Binary file not shown.
105
icarus-miner/data/usr/lib/python2.6/genericpath.py
Normal file
105
icarus-miner/data/usr/lib/python2.6/genericpath.py
Normal file
@ -0,0 +1,105 @@
|
|||||||
|
"""
|
||||||
|
Path operations common to more than one OS
|
||||||
|
Do not use directly. The OS specific modules import the appropriate
|
||||||
|
functions from this module themselves.
|
||||||
|
"""
|
||||||
|
import os
|
||||||
|
import stat
|
||||||
|
|
||||||
|
__all__ = ['commonprefix', 'exists', 'getatime', 'getctime', 'getmtime',
|
||||||
|
'getsize', 'isdir', 'isfile']
|
||||||
|
|
||||||
|
|
||||||
|
# Does a path exist?
|
||||||
|
# This is false for dangling symbolic links on systems that support them.
|
||||||
|
def exists(path):
|
||||||
|
"""Test whether a path exists. Returns False for broken symbolic links"""
|
||||||
|
try:
|
||||||
|
st = os.stat(path)
|
||||||
|
except os.error:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
# This follows symbolic links, so both islink() and isdir() can be true
|
||||||
|
# for the same path ono systems that support symlinks
|
||||||
|
def isfile(path):
|
||||||
|
"""Test whether a path is a regular file"""
|
||||||
|
try:
|
||||||
|
st = os.stat(path)
|
||||||
|
except os.error:
|
||||||
|
return False
|
||||||
|
return stat.S_ISREG(st.st_mode)
|
||||||
|
|
||||||
|
|
||||||
|
# Is a path a directory?
|
||||||
|
# This follows symbolic links, so both islink() and isdir()
|
||||||
|
# can be true for the same path on systems that support symlinks
|
||||||
|
def isdir(s):
|
||||||
|
"""Return true if the pathname refers to an existing directory."""
|
||||||
|
try:
|
||||||
|
st = os.stat(s)
|
||||||
|
except os.error:
|
||||||
|
return False
|
||||||
|
return stat.S_ISDIR(st.st_mode)
|
||||||
|
|
||||||
|
|
||||||
|
def getsize(filename):
|
||||||
|
"""Return the size of a file, reported by os.stat()."""
|
||||||
|
return os.stat(filename).st_size
|
||||||
|
|
||||||
|
|
||||||
|
def getmtime(filename):
|
||||||
|
"""Return the last modification time of a file, reported by os.stat()."""
|
||||||
|
return os.stat(filename).st_mtime
|
||||||
|
|
||||||
|
|
||||||
|
def getatime(filename):
|
||||||
|
"""Return the last access time of a file, reported by os.stat()."""
|
||||||
|
return os.stat(filename).st_atime
|
||||||
|
|
||||||
|
|
||||||
|
def getctime(filename):
|
||||||
|
"""Return the metadata change time of a file, reported by os.stat()."""
|
||||||
|
return os.stat(filename).st_ctime
|
||||||
|
|
||||||
|
|
||||||
|
# Return the longest prefix of all list elements.
|
||||||
|
def commonprefix(m):
|
||||||
|
"Given a list of pathnames, returns the longest common leading component"
|
||||||
|
if not m: return ''
|
||||||
|
s1 = min(m)
|
||||||
|
s2 = max(m)
|
||||||
|
for i, c in enumerate(s1):
|
||||||
|
if c != s2[i]:
|
||||||
|
return s1[:i]
|
||||||
|
return s1
|
||||||
|
|
||||||
|
# Split a path in root and extension.
|
||||||
|
# The extension is everything starting at the last dot in the last
|
||||||
|
# pathname component; the root is everything before that.
|
||||||
|
# It is always true that root + ext == p.
|
||||||
|
|
||||||
|
# Generic implementation of splitext, to be parametrized with
|
||||||
|
# the separators
|
||||||
|
def _splitext(p, sep, altsep, extsep):
|
||||||
|
"""Split the extension from a pathname.
|
||||||
|
|
||||||
|
Extension is everything from the last dot to the end, ignoring
|
||||||
|
leading dots. Returns "(root, ext)"; ext may be empty."""
|
||||||
|
|
||||||
|
sepIndex = p.rfind(sep)
|
||||||
|
if altsep:
|
||||||
|
altsepIndex = p.rfind(altsep)
|
||||||
|
sepIndex = max(sepIndex, altsepIndex)
|
||||||
|
|
||||||
|
dotIndex = p.rfind(extsep)
|
||||||
|
if dotIndex > sepIndex:
|
||||||
|
# skip all leading dots
|
||||||
|
filenameIndex = sepIndex + 1
|
||||||
|
while filenameIndex < dotIndex:
|
||||||
|
if p[filenameIndex] != extsep:
|
||||||
|
return p[:dotIndex], p[dotIndex:]
|
||||||
|
filenameIndex += 1
|
||||||
|
|
||||||
|
return p, ''
|
BIN
icarus-miner/data/usr/lib/python2.6/genericpath.pyc
Normal file
BIN
icarus-miner/data/usr/lib/python2.6/genericpath.pyc
Normal file
Binary file not shown.
393
icarus-miner/data/usr/lib/python2.6/heapq.py
Normal file
393
icarus-miner/data/usr/lib/python2.6/heapq.py
Normal file
@ -0,0 +1,393 @@
|
|||||||
|
# -*- coding: Latin-1 -*-
|
||||||
|
|
||||||
|
"""Heap queue algorithm (a.k.a. priority queue).
|
||||||
|
|
||||||
|
Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
|
||||||
|
all k, counting elements from 0. For the sake of comparison,
|
||||||
|
non-existing elements are considered to be infinite. The interesting
|
||||||
|
property of a heap is that a[0] is always its smallest element.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
|
||||||
|
heap = [] # creates an empty heap
|
||||||
|
heappush(heap, item) # pushes a new item on the heap
|
||||||
|
item = heappop(heap) # pops the smallest item from the heap
|
||||||
|
item = heap[0] # smallest item on the heap without popping it
|
||||||
|
heapify(x) # transforms list into a heap, in-place, in linear time
|
||||||
|
item = heapreplace(heap, item) # pops and returns smallest item, and adds
|
||||||
|
# new item; the heap size is unchanged
|
||||||
|
|
||||||
|
Our API differs from textbook heap algorithms as follows:
|
||||||
|
|
||||||
|
- We use 0-based indexing. This makes the relationship between the
|
||||||
|
index for a node and the indexes for its children slightly less
|
||||||
|
obvious, but is more suitable since Python uses 0-based indexing.
|
||||||
|
|
||||||
|
- Our heappop() method returns the smallest item, not the largest.
|
||||||
|
|
||||||
|
These two make it possible to view the heap as a regular Python list
|
||||||
|
without surprises: heap[0] is the smallest item, and heap.sort()
|
||||||
|
maintains the heap invariant!
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Original code by Kevin O'Connor, augmented by Tim Peters and Raymond Hettinger
|
||||||
|
|
||||||
|
__about__ = """Heap queues
|
||||||
|
|
||||||
|
[explanation by François Pinard]
|
||||||
|
|
||||||
|
Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
|
||||||
|
all k, counting elements from 0. For the sake of comparison,
|
||||||
|
non-existing elements are considered to be infinite. The interesting
|
||||||
|
property of a heap is that a[0] is always its smallest element.
|
||||||
|
|
||||||
|
The strange invariant above is meant to be an efficient memory
|
||||||
|
representation for a tournament. The numbers below are `k', not a[k]:
|
||||||
|
|
||||||
|
0
|
||||||
|
|
||||||
|
1 2
|
||||||
|
|
||||||
|
3 4 5 6
|
||||||
|
|
||||||
|
7 8 9 10 11 12 13 14
|
||||||
|
|
||||||
|
15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
|
||||||
|
|
||||||
|
|
||||||
|
In the tree above, each cell `k' is topping `2*k+1' and `2*k+2'. In
|
||||||
|
an usual binary tournament we see in sports, each cell is the winner
|
||||||
|
over the two cells it tops, and we can trace the winner down the tree
|
||||||
|
to see all opponents s/he had. However, in many computer applications
|
||||||
|
of such tournaments, we do not need to trace the history of a winner.
|
||||||
|
To be more memory efficient, when a winner is promoted, we try to
|
||||||
|
replace it by something else at a lower level, and the rule becomes
|
||||||
|
that a cell and the two cells it tops contain three different items,
|
||||||
|
but the top cell "wins" over the two topped cells.
|
||||||
|
|
||||||
|
If this heap invariant is protected at all time, index 0 is clearly
|
||||||
|
the overall winner. The simplest algorithmic way to remove it and
|
||||||
|
find the "next" winner is to move some loser (let's say cell 30 in the
|
||||||
|
diagram above) into the 0 position, and then percolate this new 0 down
|
||||||
|
the tree, exchanging values, until the invariant is re-established.
|
||||||
|
This is clearly logarithmic on the total number of items in the tree.
|
||||||
|
By iterating over all items, you get an O(n ln n) sort.
|
||||||
|
|
||||||
|
A nice feature of this sort is that you can efficiently insert new
|
||||||
|
items while the sort is going on, provided that the inserted items are
|
||||||
|
not "better" than the last 0'th element you extracted. This is
|
||||||
|
especially useful in simulation contexts, where the tree holds all
|
||||||
|
incoming events, and the "win" condition means the smallest scheduled
|
||||||
|
time. When an event schedule other events for execution, they are
|
||||||
|
scheduled into the future, so they can easily go into the heap. So, a
|
||||||
|
heap is a good structure for implementing schedulers (this is what I
|
||||||
|
used for my MIDI sequencer :-).
|
||||||
|
|
||||||
|
Various structures for implementing schedulers have been extensively
|
||||||
|
studied, and heaps are good for this, as they are reasonably speedy,
|
||||||
|
the speed is almost constant, and the worst case is not much different
|
||||||
|
than the average case. However, there are other representations which
|
||||||
|
are more efficient overall, yet the worst cases might be terrible.
|
||||||
|
|
||||||
|
Heaps are also very useful in big disk sorts. You most probably all
|
||||||
|
know that a big sort implies producing "runs" (which are pre-sorted
|
||||||
|
sequences, which size is usually related to the amount of CPU memory),
|
||||||
|
followed by a merging passes for these runs, which merging is often
|
||||||
|
very cleverly organised[1]. It is very important that the initial
|
||||||
|
sort produces the longest runs possible. Tournaments are a good way
|
||||||
|
to that. If, using all the memory available to hold a tournament, you
|
||||||
|
replace and percolate items that happen to fit the current run, you'll
|
||||||
|
produce runs which are twice the size of the memory for random input,
|
||||||
|
and much better for input fuzzily ordered.
|
||||||
|
|
||||||
|
Moreover, if you output the 0'th item on disk and get an input which
|
||||||
|
may not fit in the current tournament (because the value "wins" over
|
||||||
|
the last output value), it cannot fit in the heap, so the size of the
|
||||||
|
heap decreases. The freed memory could be cleverly reused immediately
|
||||||
|
for progressively building a second heap, which grows at exactly the
|
||||||
|
same rate the first heap is melting. When the first heap completely
|
||||||
|
vanishes, you switch heaps and start a new run. Clever and quite
|
||||||
|
effective!
|
||||||
|
|
||||||
|
In a word, heaps are useful memory structures to know. I use them in
|
||||||
|
a few applications, and I think it is good to keep a `heap' module
|
||||||
|
around. :-)
|
||||||
|
|
||||||
|
--------------------
|
||||||
|
[1] The disk balancing algorithms which are current, nowadays, are
|
||||||
|
more annoying than clever, and this is a consequence of the seeking
|
||||||
|
capabilities of the disks. On devices which cannot seek, like big
|
||||||
|
tape drives, the story was quite different, and one had to be very
|
||||||
|
clever to ensure (far in advance) that each tape movement will be the
|
||||||
|
most effective possible (that is, will best participate at
|
||||||
|
"progressing" the merge). Some tapes were even able to read
|
||||||
|
backwards, and this was also used to avoid the rewinding time.
|
||||||
|
Believe me, real good tape sorts were quite spectacular to watch!
|
||||||
|
From all times, sorting has always been a Great Art! :-)
|
||||||
|
"""
|
||||||
|
|
||||||
|
__all__ = ['heappush', 'heappop', 'heapify', 'heapreplace', 'merge',
|
||||||
|
'nlargest', 'nsmallest', 'heappushpop']
|
||||||
|
|
||||||
|
from itertools import islice, repeat, count, imap, izip, tee
|
||||||
|
from operator import itemgetter, neg
|
||||||
|
import bisect
|
||||||
|
|
||||||
|
def heappush(heap, item):
|
||||||
|
"""Push item onto heap, maintaining the heap invariant."""
|
||||||
|
heap.append(item)
|
||||||
|
_siftdown(heap, 0, len(heap)-1)
|
||||||
|
|
||||||
|
def heappop(heap):
|
||||||
|
"""Pop the smallest item off the heap, maintaining the heap invariant."""
|
||||||
|
lastelt = heap.pop() # raises appropriate IndexError if heap is empty
|
||||||
|
if heap:
|
||||||
|
returnitem = heap[0]
|
||||||
|
heap[0] = lastelt
|
||||||
|
_siftup(heap, 0)
|
||||||
|
else:
|
||||||
|
returnitem = lastelt
|
||||||
|
return returnitem
|
||||||
|
|
||||||
|
def heapreplace(heap, item):
|
||||||
|
"""Pop and return the current smallest value, and add the new item.
|
||||||
|
|
||||||
|
This is more efficient than heappop() followed by heappush(), and can be
|
||||||
|
more appropriate when using a fixed-size heap. Note that the value
|
||||||
|
returned may be larger than item! That constrains reasonable uses of
|
||||||
|
this routine unless written as part of a conditional replacement:
|
||||||
|
|
||||||
|
if item > heap[0]:
|
||||||
|
item = heapreplace(heap, item)
|
||||||
|
"""
|
||||||
|
returnitem = heap[0] # raises appropriate IndexError if heap is empty
|
||||||
|
heap[0] = item
|
||||||
|
_siftup(heap, 0)
|
||||||
|
return returnitem
|
||||||
|
|
||||||
|
def heappushpop(heap, item):
|
||||||
|
"""Fast version of a heappush followed by a heappop."""
|
||||||
|
if heap and heap[0] < item:
|
||||||
|
item, heap[0] = heap[0], item
|
||||||
|
_siftup(heap, 0)
|
||||||
|
return item
|
||||||
|
|
||||||
|
def heapify(x):
|
||||||
|
"""Transform list into a heap, in-place, in O(len(heap)) time."""
|
||||||
|
n = len(x)
|
||||||
|
# Transform bottom-up. The largest index there's any point to looking at
|
||||||
|
# is the largest with a child index in-range, so must have 2*i + 1 < n,
|
||||||
|
# or i < (n-1)/2. If n is even = 2*j, this is (2*j-1)/2 = j-1/2 so
|
||||||
|
# j-1 is the largest, which is n//2 - 1. If n is odd = 2*j+1, this is
|
||||||
|
# (2*j+1-1)/2 = j so j-1 is the largest, and that's again n//2-1.
|
||||||
|
for i in reversed(xrange(n//2)):
|
||||||
|
_siftup(x, i)
|
||||||
|
|
||||||
|
def nlargest(n, iterable):
|
||||||
|
"""Find the n largest elements in a dataset.
|
||||||
|
|
||||||
|
Equivalent to: sorted(iterable, reverse=True)[:n]
|
||||||
|
"""
|
||||||
|
it = iter(iterable)
|
||||||
|
result = list(islice(it, n))
|
||||||
|
if not result:
|
||||||
|
return result
|
||||||
|
heapify(result)
|
||||||
|
_heappushpop = heappushpop
|
||||||
|
for elem in it:
|
||||||
|
_heappushpop(result, elem)
|
||||||
|
result.sort(reverse=True)
|
||||||
|
return result
|
||||||
|
|
||||||
|
def nsmallest(n, iterable):
|
||||||
|
"""Find the n smallest elements in a dataset.
|
||||||
|
|
||||||
|
Equivalent to: sorted(iterable)[:n]
|
||||||
|
"""
|
||||||
|
if hasattr(iterable, '__len__') and n * 10 <= len(iterable):
|
||||||
|
# For smaller values of n, the bisect method is faster than a minheap.
|
||||||
|
# It is also memory efficient, consuming only n elements of space.
|
||||||
|
it = iter(iterable)
|
||||||
|
result = sorted(islice(it, 0, n))
|
||||||
|
if not result:
|
||||||
|
return result
|
||||||
|
insort = bisect.insort
|
||||||
|
pop = result.pop
|
||||||
|
los = result[-1] # los --> Largest of the nsmallest
|
||||||
|
for elem in it:
|
||||||
|
if los <= elem:
|
||||||
|
continue
|
||||||
|
insort(result, elem)
|
||||||
|
pop()
|
||||||
|
los = result[-1]
|
||||||
|
return result
|
||||||
|
# An alternative approach manifests the whole iterable in memory but
|
||||||
|
# saves comparisons by heapifying all at once. Also, saves time
|
||||||
|
# over bisect.insort() which has O(n) data movement time for every
|
||||||
|
# insertion. Finding the n smallest of an m length iterable requires
|
||||||
|
# O(m) + O(n log m) comparisons.
|
||||||
|
h = list(iterable)
|
||||||
|
heapify(h)
|
||||||
|
return map(heappop, repeat(h, min(n, len(h))))
|
||||||
|
|
||||||
|
# 'heap' is a heap at all indices >= startpos, except possibly for pos. pos
|
||||||
|
# is the index of a leaf with a possibly out-of-order value. Restore the
|
||||||
|
# heap invariant.
|
||||||
|
def _siftdown(heap, startpos, pos):
|
||||||
|
newitem = heap[pos]
|
||||||
|
# Follow the path to the root, moving parents down until finding a place
|
||||||
|
# newitem fits.
|
||||||
|
while pos > startpos:
|
||||||
|
parentpos = (pos - 1) >> 1
|
||||||
|
parent = heap[parentpos]
|
||||||
|
if newitem < parent:
|
||||||
|
heap[pos] = parent
|
||||||
|
pos = parentpos
|
||||||
|
continue
|
||||||
|
break
|
||||||
|
heap[pos] = newitem
|
||||||
|
|
||||||
|
# The child indices of heap index pos are already heaps, and we want to make
|
||||||
|
# a heap at index pos too. We do this by bubbling the smaller child of
|
||||||
|
# pos up (and so on with that child's children, etc) until hitting a leaf,
|
||||||
|
# then using _siftdown to move the oddball originally at index pos into place.
|
||||||
|
#
|
||||||
|
# We *could* break out of the loop as soon as we find a pos where newitem <=
|
||||||
|
# both its children, but turns out that's not a good idea, and despite that
|
||||||
|
# many books write the algorithm that way. During a heap pop, the last array
|
||||||
|
# element is sifted in, and that tends to be large, so that comparing it
|
||||||
|
# against values starting from the root usually doesn't pay (= usually doesn't
|
||||||
|
# get us out of the loop early). See Knuth, Volume 3, where this is
|
||||||
|
# explained and quantified in an exercise.
|
||||||
|
#
|
||||||
|
# Cutting the # of comparisons is important, since these routines have no
|
||||||
|
# way to extract "the priority" from an array element, so that intelligence
|
||||||
|
# is likely to be hiding in custom __cmp__ methods, or in array elements
|
||||||
|
# storing (priority, record) tuples. Comparisons are thus potentially
|
||||||
|
# expensive.
|
||||||
|
#
|
||||||
|
# On random arrays of length 1000, making this change cut the number of
|
||||||
|
# comparisons made by heapify() a little, and those made by exhaustive
|
||||||
|
# heappop() a lot, in accord with theory. Here are typical results from 3
|
||||||
|
# runs (3 just to demonstrate how small the variance is):
|
||||||
|
#
|
||||||
|
# Compares needed by heapify Compares needed by 1000 heappops
|
||||||
|
# -------------------------- --------------------------------
|
||||||
|
# 1837 cut to 1663 14996 cut to 8680
|
||||||
|
# 1855 cut to 1659 14966 cut to 8678
|
||||||
|
# 1847 cut to 1660 15024 cut to 8703
|
||||||
|
#
|
||||||
|
# Building the heap by using heappush() 1000 times instead required
|
||||||
|
# 2198, 2148, and 2219 compares: heapify() is more efficient, when
|
||||||
|
# you can use it.
|
||||||
|
#
|
||||||
|
# The total compares needed by list.sort() on the same lists were 8627,
|
||||||
|
# 8627, and 8632 (this should be compared to the sum of heapify() and
|
||||||
|
# heappop() compares): list.sort() is (unsurprisingly!) more efficient
|
||||||
|
# for sorting.
|
||||||
|
|
||||||
|
def _siftup(heap, pos):
|
||||||
|
endpos = len(heap)
|
||||||
|
startpos = pos
|
||||||
|
newitem = heap[pos]
|
||||||
|
# Bubble up the smaller child until hitting a leaf.
|
||||||
|
childpos = 2*pos + 1 # leftmost child position
|
||||||
|
while childpos < endpos:
|
||||||
|
# Set childpos to index of smaller child.
|
||||||
|
rightpos = childpos + 1
|
||||||
|
if rightpos < endpos and not heap[childpos] < heap[rightpos]:
|
||||||
|
childpos = rightpos
|
||||||
|
# Move the smaller child up.
|
||||||
|
heap[pos] = heap[childpos]
|
||||||
|
pos = childpos
|
||||||
|
childpos = 2*pos + 1
|
||||||
|
# The leaf at pos is empty now. Put newitem there, and bubble it up
|
||||||
|
# to its final resting place (by sifting its parents down).
|
||||||
|
heap[pos] = newitem
|
||||||
|
_siftdown(heap, startpos, pos)
|
||||||
|
|
||||||
|
# If available, use C implementation
|
||||||
|
try:
|
||||||
|
from _heapq import heappush, heappop, heapify, heapreplace, nlargest, nsmallest, heappushpop
|
||||||
|
except ImportError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def merge(*iterables):
|
||||||
|
'''Merge multiple sorted inputs into a single sorted output.
|
||||||
|
|
||||||
|
Similar to sorted(itertools.chain(*iterables)) but returns a generator,
|
||||||
|
does not pull the data into memory all at once, and assumes that each of
|
||||||
|
the input streams is already sorted (smallest to largest).
|
||||||
|
|
||||||
|
>>> list(merge([1,3,5,7], [0,2,4,8], [5,10,15,20], [], [25]))
|
||||||
|
[0, 1, 2, 3, 4, 5, 5, 7, 8, 10, 15, 20, 25]
|
||||||
|
|
||||||
|
'''
|
||||||
|
_heappop, _heapreplace, _StopIteration = heappop, heapreplace, StopIteration
|
||||||
|
|
||||||
|
h = []
|
||||||
|
h_append = h.append
|
||||||
|
for itnum, it in enumerate(map(iter, iterables)):
|
||||||
|
try:
|
||||||
|
next = it.next
|
||||||
|
h_append([next(), itnum, next])
|
||||||
|
except _StopIteration:
|
||||||
|
pass
|
||||||
|
heapify(h)
|
||||||
|
|
||||||
|
while 1:
|
||||||
|
try:
|
||||||
|
while 1:
|
||||||
|
v, itnum, next = s = h[0] # raises IndexError when h is empty
|
||||||
|
yield v
|
||||||
|
s[0] = next() # raises StopIteration when exhausted
|
||||||
|
_heapreplace(h, s) # restore heap condition
|
||||||
|
except _StopIteration:
|
||||||
|
_heappop(h) # remove empty iterator
|
||||||
|
except IndexError:
|
||||||
|
return
|
||||||
|
|
||||||
|
# Extend the implementations of nsmallest and nlargest to use a key= argument
|
||||||
|
_nsmallest = nsmallest
|
||||||
|
def nsmallest(n, iterable, key=None):
|
||||||
|
"""Find the n smallest elements in a dataset.
|
||||||
|
|
||||||
|
Equivalent to: sorted(iterable, key=key)[:n]
|
||||||
|
"""
|
||||||
|
if key is None:
|
||||||
|
it = izip(iterable, count()) # decorate
|
||||||
|
result = _nsmallest(n, it)
|
||||||
|
return map(itemgetter(0), result) # undecorate
|
||||||
|
in1, in2 = tee(iterable)
|
||||||
|
it = izip(imap(key, in1), count(), in2) # decorate
|
||||||
|
result = _nsmallest(n, it)
|
||||||
|
return map(itemgetter(2), result) # undecorate
|
||||||
|
|
||||||
|
_nlargest = nlargest
|
||||||
|
def nlargest(n, iterable, key=None):
|
||||||
|
"""Find the n largest elements in a dataset.
|
||||||
|
|
||||||
|
Equivalent to: sorted(iterable, key=key, reverse=True)[:n]
|
||||||
|
"""
|
||||||
|
if key is None:
|
||||||
|
it = izip(iterable, imap(neg, count())) # decorate
|
||||||
|
result = _nlargest(n, it)
|
||||||
|
return map(itemgetter(0), result) # undecorate
|
||||||
|
in1, in2 = tee(iterable)
|
||||||
|
it = izip(imap(key, in1), imap(neg, count()), in2) # decorate
|
||||||
|
result = _nlargest(n, it)
|
||||||
|
return map(itemgetter(2), result) # undecorate
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
# Simple sanity test
|
||||||
|
heap = []
|
||||||
|
data = [1, 3, 5, 7, 9, 2, 4, 6, 8, 0]
|
||||||
|
for item in data:
|
||||||
|
heappush(heap, item)
|
||||||
|
sort = []
|
||||||
|
while heap:
|
||||||
|
sort.append(heappop(heap))
|
||||||
|
print sort
|
||||||
|
|
||||||
|
import doctest
|
||||||
|
doctest.testmod()
|
BIN
icarus-miner/data/usr/lib/python2.6/heapq.pyc
Normal file
BIN
icarus-miner/data/usr/lib/python2.6/heapq.pyc
Normal file
Binary file not shown.
79
icarus-miner/data/usr/lib/python2.6/hex_codec.py
Normal file
79
icarus-miner/data/usr/lib/python2.6/hex_codec.py
Normal file
@ -0,0 +1,79 @@
|
|||||||
|
""" Python 'hex_codec' Codec - 2-digit hex content transfer encoding
|
||||||
|
|
||||||
|
Unlike most of the other codecs which target Unicode, this codec
|
||||||
|
will return Python string objects for both encode and decode.
|
||||||
|
|
||||||
|
Written by Marc-Andre Lemburg (mal@lemburg.com).
|
||||||
|
|
||||||
|
"""
|
||||||
|
import codecs, binascii
|
||||||
|
|
||||||
|
### Codec APIs
|
||||||
|
|
||||||
|
def hex_encode(input,errors='strict'):
|
||||||
|
|
||||||
|
""" Encodes the object input and returns a tuple (output
|
||||||
|
object, length consumed).
|
||||||
|
|
||||||
|
errors defines the error handling to apply. It defaults to
|
||||||
|
'strict' handling which is the only currently supported
|
||||||
|
error handling for this codec.
|
||||||
|
|
||||||
|
"""
|
||||||
|
assert errors == 'strict'
|
||||||
|
output = binascii.b2a_hex(input)
|
||||||
|
return (output, len(input))
|
||||||
|
|
||||||
|
def hex_decode(input,errors='strict'):
|
||||||
|
|
||||||
|
""" Decodes the object input and returns a tuple (output
|
||||||
|
object, length consumed).
|
||||||
|
|
||||||
|
input must be an object which provides the bf_getreadbuf
|
||||||
|
buffer slot. Python strings, buffer objects and memory
|
||||||
|
mapped files are examples of objects providing this slot.
|
||||||
|
|
||||||
|
errors defines the error handling to apply. It defaults to
|
||||||
|
'strict' handling which is the only currently supported
|
||||||
|
error handling for this codec.
|
||||||
|
|
||||||
|
"""
|
||||||
|
assert errors == 'strict'
|
||||||
|
output = binascii.a2b_hex(input)
|
||||||
|
return (output, len(input))
|
||||||
|
|
||||||
|
class Codec(codecs.Codec):
|
||||||
|
|
||||||
|
def encode(self, input,errors='strict'):
|
||||||
|
return hex_encode(input,errors)
|
||||||
|
def decode(self, input,errors='strict'):
|
||||||
|
return hex_decode(input,errors)
|
||||||
|
|
||||||
|
class IncrementalEncoder(codecs.IncrementalEncoder):
|
||||||
|
def encode(self, input, final=False):
|
||||||
|
assert self.errors == 'strict'
|
||||||
|
return binascii.b2a_hex(input)
|
||||||
|
|
||||||
|
class IncrementalDecoder(codecs.IncrementalDecoder):
|
||||||
|
def decode(self, input, final=False):
|
||||||
|
assert self.errors == 'strict'
|
||||||
|
return binascii.a2b_hex(input)
|
||||||
|
|
||||||
|
class StreamWriter(Codec,codecs.StreamWriter):
|
||||||
|
pass
|
||||||
|
|
||||||
|
class StreamReader(Codec,codecs.StreamReader):
|
||||||
|
pass
|
||||||
|
|
||||||
|
### encodings module API
|
||||||
|
|
||||||
|
def getregentry():
|
||||||
|
return codecs.CodecInfo(
|
||||||
|
name='hex',
|
||||||
|
encode=hex_encode,
|
||||||
|
decode=hex_decode,
|
||||||
|
incrementalencoder=IncrementalEncoder,
|
||||||
|
incrementaldecoder=IncrementalDecoder,
|
||||||
|
streamwriter=StreamWriter,
|
||||||
|
streamreader=StreamReader,
|
||||||
|
)
|
BIN
icarus-miner/data/usr/lib/python2.6/hex_codec.pyc
Normal file
BIN
icarus-miner/data/usr/lib/python2.6/hex_codec.pyc
Normal file
Binary file not shown.
1327
icarus-miner/data/usr/lib/python2.6/httplib.py
Normal file
1327
icarus-miner/data/usr/lib/python2.6/httplib.py
Normal file
File diff suppressed because it is too large
Load Diff
BIN
icarus-miner/data/usr/lib/python2.6/httplib.pyc
Normal file
BIN
icarus-miner/data/usr/lib/python2.6/httplib.pyc
Normal file
Binary file not shown.
318
icarus-miner/data/usr/lib/python2.6/json/__init__.py
Normal file
318
icarus-miner/data/usr/lib/python2.6/json/__init__.py
Normal file
@ -0,0 +1,318 @@
|
|||||||
|
r"""A simple, fast, extensible JSON encoder and decoder
|
||||||
|
|
||||||
|
JSON (JavaScript Object Notation) <http://json.org> is a subset of
|
||||||
|
JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
|
||||||
|
interchange format.
|
||||||
|
|
||||||
|
json exposes an API familiar to uses of the standard library
|
||||||
|
marshal and pickle modules.
|
||||||
|
|
||||||
|
Encoding basic Python object hierarchies::
|
||||||
|
|
||||||
|
>>> import json
|
||||||
|
>>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
|
||||||
|
'["foo", {"bar": ["baz", null, 1.0, 2]}]'
|
||||||
|
>>> print json.dumps("\"foo\bar")
|
||||||
|
"\"foo\bar"
|
||||||
|
>>> print json.dumps(u'\u1234')
|
||||||
|
"\u1234"
|
||||||
|
>>> print json.dumps('\\')
|
||||||
|
"\\"
|
||||||
|
>>> print json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)
|
||||||
|
{"a": 0, "b": 0, "c": 0}
|
||||||
|
>>> from StringIO import StringIO
|
||||||
|
>>> io = StringIO()
|
||||||
|
>>> json.dump(['streaming API'], io)
|
||||||
|
>>> io.getvalue()
|
||||||
|
'["streaming API"]'
|
||||||
|
|
||||||
|
Compact encoding::
|
||||||
|
|
||||||
|
>>> import json
|
||||||
|
>>> json.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':'))
|
||||||
|
'[1,2,3,{"4":5,"6":7}]'
|
||||||
|
|
||||||
|
Pretty printing (using repr() because of extraneous whitespace in the output)::
|
||||||
|
|
||||||
|
>>> import json
|
||||||
|
>>> print repr(json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4))
|
||||||
|
'{\n "4": 5, \n "6": 7\n}'
|
||||||
|
|
||||||
|
Decoding JSON::
|
||||||
|
|
||||||
|
>>> import json
|
||||||
|
>>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]')
|
||||||
|
[u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
|
||||||
|
>>> json.loads('"\\"foo\\bar"')
|
||||||
|
u'"foo\x08ar'
|
||||||
|
>>> from StringIO import StringIO
|
||||||
|
>>> io = StringIO('["streaming API"]')
|
||||||
|
>>> json.load(io)
|
||||||
|
[u'streaming API']
|
||||||
|
|
||||||
|
Specializing JSON object decoding::
|
||||||
|
|
||||||
|
>>> import json
|
||||||
|
>>> def as_complex(dct):
|
||||||
|
... if '__complex__' in dct:
|
||||||
|
... return complex(dct['real'], dct['imag'])
|
||||||
|
... return dct
|
||||||
|
...
|
||||||
|
>>> json.loads('{"__complex__": true, "real": 1, "imag": 2}',
|
||||||
|
... object_hook=as_complex)
|
||||||
|
(1+2j)
|
||||||
|
>>> import decimal
|
||||||
|
>>> json.loads('1.1', parse_float=decimal.Decimal)
|
||||||
|
Decimal('1.1')
|
||||||
|
|
||||||
|
Extending JSONEncoder::
|
||||||
|
|
||||||
|
>>> import json
|
||||||
|
>>> class ComplexEncoder(json.JSONEncoder):
|
||||||
|
... def default(self, obj):
|
||||||
|
... if isinstance(obj, complex):
|
||||||
|
... return [obj.real, obj.imag]
|
||||||
|
... return json.JSONEncoder.default(self, obj)
|
||||||
|
...
|
||||||
|
>>> dumps(2 + 1j, cls=ComplexEncoder)
|
||||||
|
'[2.0, 1.0]'
|
||||||
|
>>> ComplexEncoder().encode(2 + 1j)
|
||||||
|
'[2.0, 1.0]'
|
||||||
|
>>> list(ComplexEncoder().iterencode(2 + 1j))
|
||||||
|
['[', '2.0', ', ', '1.0', ']']
|
||||||
|
|
||||||
|
|
||||||
|
Using json.tool from the shell to validate and
|
||||||
|
pretty-print::
|
||||||
|
|
||||||
|
$ echo '{"json":"obj"}' | python -mjson.tool
|
||||||
|
{
|
||||||
|
"json": "obj"
|
||||||
|
}
|
||||||
|
$ echo '{ 1.2:3.4}' | python -mjson.tool
|
||||||
|
Expecting property name: line 1 column 2 (char 2)
|
||||||
|
|
||||||
|
Note that the JSON produced by this module's default settings
|
||||||
|
is a subset of YAML, so it may be used as a serializer for that as well.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
__version__ = '1.9'
|
||||||
|
__all__ = [
|
||||||
|
'dump', 'dumps', 'load', 'loads',
|
||||||
|
'JSONDecoder', 'JSONEncoder',
|
||||||
|
]
|
||||||
|
|
||||||
|
__author__ = 'Bob Ippolito <bob@redivi.com>'
|
||||||
|
|
||||||
|
from .decoder import JSONDecoder
|
||||||
|
from .encoder import JSONEncoder
|
||||||
|
|
||||||
|
_default_encoder = JSONEncoder(
|
||||||
|
skipkeys=False,
|
||||||
|
ensure_ascii=True,
|
||||||
|
check_circular=True,
|
||||||
|
allow_nan=True,
|
||||||
|
indent=None,
|
||||||
|
separators=None,
|
||||||
|
encoding='utf-8',
|
||||||
|
default=None,
|
||||||
|
)
|
||||||
|
|
||||||
|
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
|
||||||
|
allow_nan=True, cls=None, indent=None, separators=None,
|
||||||
|
encoding='utf-8', default=None, **kw):
|
||||||
|
"""Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
|
||||||
|
``.write()``-supporting file-like object).
|
||||||
|
|
||||||
|
If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types
|
||||||
|
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
|
||||||
|
will be skipped instead of raising a ``TypeError``.
|
||||||
|
|
||||||
|
If ``ensure_ascii`` is ``False``, then the some chunks written to ``fp``
|
||||||
|
may be ``unicode`` instances, subject to normal Python ``str`` to
|
||||||
|
``unicode`` coercion rules. Unless ``fp.write()`` explicitly
|
||||||
|
understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
|
||||||
|
to cause an error.
|
||||||
|
|
||||||
|
If ``check_circular`` is ``False``, then the circular reference check
|
||||||
|
for container types will be skipped and a circular reference will
|
||||||
|
result in an ``OverflowError`` (or worse).
|
||||||
|
|
||||||
|
If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to
|
||||||
|
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
|
||||||
|
in strict compliance of the JSON specification, instead of using the
|
||||||
|
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
|
||||||
|
|
||||||
|
If ``indent`` is a non-negative integer, then JSON array elements and object
|
||||||
|
members will be pretty-printed with that indent level. An indent level
|
||||||
|
of 0 will only insert newlines. ``None`` is the most compact representation.
|
||||||
|
|
||||||
|
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
|
||||||
|
then it will be used instead of the default ``(', ', ': ')`` separators.
|
||||||
|
``(',', ':')`` is the most compact JSON representation.
|
||||||
|
|
||||||
|
``encoding`` is the character encoding for str instances, default is UTF-8.
|
||||||
|
|
||||||
|
``default(obj)`` is a function that should return a serializable version
|
||||||
|
of obj or raise TypeError. The default simply raises TypeError.
|
||||||
|
|
||||||
|
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
|
||||||
|
``.default()`` method to serialize additional types), specify it with
|
||||||
|
the ``cls`` kwarg.
|
||||||
|
|
||||||
|
"""
|
||||||
|
# cached encoder
|
||||||
|
if (skipkeys is False and ensure_ascii is True and
|
||||||
|
check_circular is True and allow_nan is True and
|
||||||
|
cls is None and indent is None and separators is None and
|
||||||
|
encoding == 'utf-8' and default is None and not kw):
|
||||||
|
iterable = _default_encoder.iterencode(obj)
|
||||||
|
else:
|
||||||
|
if cls is None:
|
||||||
|
cls = JSONEncoder
|
||||||
|
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
|
||||||
|
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
|
||||||
|
separators=separators, encoding=encoding,
|
||||||
|
default=default, **kw).iterencode(obj)
|
||||||
|
# could accelerate with writelines in some versions of Python, at
|
||||||
|
# a debuggability cost
|
||||||
|
for chunk in iterable:
|
||||||
|
fp.write(chunk)
|
||||||
|
|
||||||
|
|
||||||
|
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
|
||||||
|
allow_nan=True, cls=None, indent=None, separators=None,
|
||||||
|
encoding='utf-8', default=None, **kw):
|
||||||
|
"""Serialize ``obj`` to a JSON formatted ``str``.
|
||||||
|
|
||||||
|
If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types
|
||||||
|
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
|
||||||
|
will be skipped instead of raising a ``TypeError``.
|
||||||
|
|
||||||
|
If ``ensure_ascii`` is ``False``, then the return value will be a
|
||||||
|
``unicode`` instance subject to normal Python ``str`` to ``unicode``
|
||||||
|
coercion rules instead of being escaped to an ASCII ``str``.
|
||||||
|
|
||||||
|
If ``check_circular`` is ``False``, then the circular reference check
|
||||||
|
for container types will be skipped and a circular reference will
|
||||||
|
result in an ``OverflowError`` (or worse).
|
||||||
|
|
||||||
|
If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to
|
||||||
|
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
|
||||||
|
strict compliance of the JSON specification, instead of using the
|
||||||
|
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
|
||||||
|
|
||||||
|
If ``indent`` is a non-negative integer, then JSON array elements and
|
||||||
|
object members will be pretty-printed with that indent level. An indent
|
||||||
|
level of 0 will only insert newlines. ``None`` is the most compact
|
||||||
|
representation.
|
||||||
|
|
||||||
|
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
|
||||||
|
then it will be used instead of the default ``(', ', ': ')`` separators.
|
||||||
|
``(',', ':')`` is the most compact JSON representation.
|
||||||
|
|
||||||
|
``encoding`` is the character encoding for str instances, default is UTF-8.
|
||||||
|
|
||||||
|
``default(obj)`` is a function that should return a serializable version
|
||||||
|
of obj or raise TypeError. The default simply raises TypeError.
|
||||||
|
|
||||||
|
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
|
||||||
|
``.default()`` method to serialize additional types), specify it with
|
||||||
|
the ``cls`` kwarg.
|
||||||
|
|
||||||
|
"""
|
||||||
|
# cached encoder
|
||||||
|
if (skipkeys is False and ensure_ascii is True and
|
||||||
|
check_circular is True and allow_nan is True and
|
||||||
|
cls is None and indent is None and separators is None and
|
||||||
|
encoding == 'utf-8' and default is None and not kw):
|
||||||
|
return _default_encoder.encode(obj)
|
||||||
|
if cls is None:
|
||||||
|
cls = JSONEncoder
|
||||||
|
return cls(
|
||||||
|
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
|
||||||
|
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
|
||||||
|
separators=separators, encoding=encoding, default=default,
|
||||||
|
**kw).encode(obj)
|
||||||
|
|
||||||
|
|
||||||
|
_default_decoder = JSONDecoder(encoding=None, object_hook=None)
|
||||||
|
|
||||||
|
|
||||||
|
def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
|
||||||
|
parse_int=None, parse_constant=None, **kw):
|
||||||
|
"""Deserialize ``fp`` (a ``.read()``-supporting file-like object
|
||||||
|
containing a JSON document) to a Python object.
|
||||||
|
|
||||||
|
If the contents of ``fp`` is encoded with an ASCII based encoding other
|
||||||
|
than utf-8 (e.g. latin-1), then an appropriate ``encoding`` name must
|
||||||
|
be specified. Encodings that are not ASCII based (such as UCS-2) are
|
||||||
|
not allowed, and should be wrapped with
|
||||||
|
``codecs.getreader(fp)(encoding)``, or simply decoded to a ``unicode``
|
||||||
|
object and passed to ``loads()``
|
||||||
|
|
||||||
|
``object_hook`` is an optional function that will be called with the
|
||||||
|
result of any object literal decode (a ``dict``). The return value of
|
||||||
|
``object_hook`` will be used instead of the ``dict``. This feature
|
||||||
|
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
|
||||||
|
|
||||||
|
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
|
||||||
|
kwarg.
|
||||||
|
|
||||||
|
"""
|
||||||
|
return loads(fp.read(),
|
||||||
|
encoding=encoding, cls=cls, object_hook=object_hook,
|
||||||
|
parse_float=parse_float, parse_int=parse_int,
|
||||||
|
parse_constant=parse_constant, **kw)
|
||||||
|
|
||||||
|
|
||||||
|
def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
|
||||||
|
parse_int=None, parse_constant=None, **kw):
|
||||||
|
"""Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
|
||||||
|
document) to a Python object.
|
||||||
|
|
||||||
|
If ``s`` is a ``str`` instance and is encoded with an ASCII based encoding
|
||||||
|
other than utf-8 (e.g. latin-1) then an appropriate ``encoding`` name
|
||||||
|
must be specified. Encodings that are not ASCII based (such as UCS-2)
|
||||||
|
are not allowed and should be decoded to ``unicode`` first.
|
||||||
|
|
||||||
|
``object_hook`` is an optional function that will be called with the
|
||||||
|
result of any object literal decode (a ``dict``). The return value of
|
||||||
|
``object_hook`` will be used instead of the ``dict``. This feature
|
||||||
|
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
|
||||||
|
|
||||||
|
``parse_float``, if specified, will be called with the string
|
||||||
|
of every JSON float to be decoded. By default this is equivalent to
|
||||||
|
float(num_str). This can be used to use another datatype or parser
|
||||||
|
for JSON floats (e.g. decimal.Decimal).
|
||||||
|
|
||||||
|
``parse_int``, if specified, will be called with the string
|
||||||
|
of every JSON int to be decoded. By default this is equivalent to
|
||||||
|
int(num_str). This can be used to use another datatype or parser
|
||||||
|
for JSON integers (e.g. float).
|
||||||
|
|
||||||
|
``parse_constant``, if specified, will be called with one of the
|
||||||
|
following strings: -Infinity, Infinity, NaN, null, true, false.
|
||||||
|
This can be used to raise an exception if invalid JSON numbers
|
||||||
|
are encountered.
|
||||||
|
|
||||||
|
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
|
||||||
|
kwarg.
|
||||||
|
|
||||||
|
"""
|
||||||
|
if (cls is None and encoding is None and object_hook is None and
|
||||||
|
parse_int is None and parse_float is None and
|
||||||
|
parse_constant is None and not kw):
|
||||||
|
return _default_decoder.decode(s)
|
||||||
|
if cls is None:
|
||||||
|
cls = JSONDecoder
|
||||||
|
if object_hook is not None:
|
||||||
|
kw['object_hook'] = object_hook
|
||||||
|
if parse_float is not None:
|
||||||
|
kw['parse_float'] = parse_float
|
||||||
|
if parse_int is not None:
|
||||||
|
kw['parse_int'] = parse_int
|
||||||
|
if parse_constant is not None:
|
||||||
|
kw['parse_constant'] = parse_constant
|
||||||
|
return cls(encoding=encoding, **kw).decode(s)
|
BIN
icarus-miner/data/usr/lib/python2.6/json/__init__.pyc
Normal file
BIN
icarus-miner/data/usr/lib/python2.6/json/__init__.pyc
Normal file
Binary file not shown.
339
icarus-miner/data/usr/lib/python2.6/json/decoder.py
Normal file
339
icarus-miner/data/usr/lib/python2.6/json/decoder.py
Normal file
@ -0,0 +1,339 @@
|
|||||||
|
"""Implementation of JSONDecoder
|
||||||
|
"""
|
||||||
|
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from json.scanner import Scanner, pattern
|
||||||
|
try:
|
||||||
|
from _json import scanstring as c_scanstring
|
||||||
|
except ImportError:
|
||||||
|
c_scanstring = None
|
||||||
|
|
||||||
|
__all__ = ['JSONDecoder']
|
||||||
|
|
||||||
|
FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
|
||||||
|
|
||||||
|
NaN, PosInf, NegInf = float('nan'), float('inf'), float('-inf')
|
||||||
|
|
||||||
|
|
||||||
|
def linecol(doc, pos):
|
||||||
|
lineno = doc.count('\n', 0, pos) + 1
|
||||||
|
if lineno == 1:
|
||||||
|
colno = pos
|
||||||
|
else:
|
||||||
|
colno = pos - doc.rindex('\n', 0, pos)
|
||||||
|
return lineno, colno
|
||||||
|
|
||||||
|
|
||||||
|
def errmsg(msg, doc, pos, end=None):
|
||||||
|
lineno, colno = linecol(doc, pos)
|
||||||
|
if end is None:
|
||||||
|
fmt = '{0}: line {1} column {2} (char {3})'
|
||||||
|
return fmt.format(msg, lineno, colno, pos)
|
||||||
|
endlineno, endcolno = linecol(doc, end)
|
||||||
|
fmt = '{0}: line {1} column {2} - line {3} column {4} (char {5} - {6})'
|
||||||
|
return fmt.format(msg, lineno, colno, endlineno, endcolno, pos, end)
|
||||||
|
|
||||||
|
|
||||||
|
_CONSTANTS = {
|
||||||
|
'-Infinity': NegInf,
|
||||||
|
'Infinity': PosInf,
|
||||||
|
'NaN': NaN,
|
||||||
|
'true': True,
|
||||||
|
'false': False,
|
||||||
|
'null': None,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def JSONConstant(match, context, c=_CONSTANTS):
|
||||||
|
s = match.group(0)
|
||||||
|
fn = getattr(context, 'parse_constant', None)
|
||||||
|
if fn is None:
|
||||||
|
rval = c[s]
|
||||||
|
else:
|
||||||
|
rval = fn(s)
|
||||||
|
return rval, None
|
||||||
|
pattern('(-?Infinity|NaN|true|false|null)')(JSONConstant)
|
||||||
|
|
||||||
|
|
||||||
|
def JSONNumber(match, context):
|
||||||
|
match = JSONNumber.regex.match(match.string, *match.span())
|
||||||
|
integer, frac, exp = match.groups()
|
||||||
|
if frac or exp:
|
||||||
|
fn = getattr(context, 'parse_float', None) or float
|
||||||
|
res = fn(integer + (frac or '') + (exp or ''))
|
||||||
|
else:
|
||||||
|
fn = getattr(context, 'parse_int', None) or int
|
||||||
|
res = fn(integer)
|
||||||
|
return res, None
|
||||||
|
pattern(r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?')(JSONNumber)
|
||||||
|
|
||||||
|
|
||||||
|
STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS)
|
||||||
|
BACKSLASH = {
|
||||||
|
'"': u'"', '\\': u'\\', '/': u'/',
|
||||||
|
'b': u'\b', 'f': u'\f', 'n': u'\n', 'r': u'\r', 't': u'\t',
|
||||||
|
}
|
||||||
|
|
||||||
|
DEFAULT_ENCODING = "utf-8"
|
||||||
|
|
||||||
|
|
||||||
|
def py_scanstring(s, end, encoding=None, strict=True, _b=BACKSLASH, _m=STRINGCHUNK.match):
|
||||||
|
if encoding is None:
|
||||||
|
encoding = DEFAULT_ENCODING
|
||||||
|
chunks = []
|
||||||
|
_append = chunks.append
|
||||||
|
begin = end - 1
|
||||||
|
while 1:
|
||||||
|
chunk = _m(s, end)
|
||||||
|
if chunk is None:
|
||||||
|
raise ValueError(
|
||||||
|
errmsg("Unterminated string starting at", s, begin))
|
||||||
|
end = chunk.end()
|
||||||
|
content, terminator = chunk.groups()
|
||||||
|
if content:
|
||||||
|
if not isinstance(content, unicode):
|
||||||
|
content = unicode(content, encoding)
|
||||||
|
_append(content)
|
||||||
|
if terminator == '"':
|
||||||
|
break
|
||||||
|
elif terminator != '\\':
|
||||||
|
if strict:
|
||||||
|
msg = "Invalid control character {0!r} at".format(terminator)
|
||||||
|
raise ValueError(errmsg(msg, s, end))
|
||||||
|
else:
|
||||||
|
_append(terminator)
|
||||||
|
continue
|
||||||
|
try:
|
||||||
|
esc = s[end]
|
||||||
|
except IndexError:
|
||||||
|
raise ValueError(
|
||||||
|
errmsg("Unterminated string starting at", s, begin))
|
||||||
|
if esc != 'u':
|
||||||
|
try:
|
||||||
|
m = _b[esc]
|
||||||
|
except KeyError:
|
||||||
|
msg = "Invalid \\escape: {0!r}".format(esc)
|
||||||
|
raise ValueError(errmsg(msg, s, end))
|
||||||
|
end += 1
|
||||||
|
else:
|
||||||
|
esc = s[end + 1:end + 5]
|
||||||
|
next_end = end + 5
|
||||||
|
msg = "Invalid \\uXXXX escape"
|
||||||
|
try:
|
||||||
|
if len(esc) != 4:
|
||||||
|
raise ValueError
|
||||||
|
uni = int(esc, 16)
|
||||||
|
if 0xd800 <= uni <= 0xdbff and sys.maxunicode > 65535:
|
||||||
|
msg = "Invalid \\uXXXX\\uXXXX surrogate pair"
|
||||||
|
if not s[end + 5:end + 7] == '\\u':
|
||||||
|
raise ValueError
|
||||||
|
esc2 = s[end + 7:end + 11]
|
||||||
|
if len(esc2) != 4:
|
||||||
|
raise ValueError
|
||||||
|
uni2 = int(esc2, 16)
|
||||||
|
uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00))
|
||||||
|
next_end += 6
|
||||||
|
m = unichr(uni)
|
||||||
|
except ValueError:
|
||||||
|
raise ValueError(errmsg(msg, s, end))
|
||||||
|
end = next_end
|
||||||
|
_append(m)
|
||||||
|
return u''.join(chunks), end
|
||||||
|
|
||||||
|
|
||||||
|
# Use speedup
|
||||||
|
if c_scanstring is not None:
|
||||||
|
scanstring = c_scanstring
|
||||||
|
else:
|
||||||
|
scanstring = py_scanstring
|
||||||
|
|
||||||
|
def JSONString(match, context):
|
||||||
|
encoding = getattr(context, 'encoding', None)
|
||||||
|
strict = getattr(context, 'strict', True)
|
||||||
|
return scanstring(match.string, match.end(), encoding, strict)
|
||||||
|
pattern(r'"')(JSONString)
|
||||||
|
|
||||||
|
|
||||||
|
WHITESPACE = re.compile(r'\s*', FLAGS)
|
||||||
|
|
||||||
|
|
||||||
|
def JSONObject(match, context, _w=WHITESPACE.match):
|
||||||
|
pairs = {}
|
||||||
|
s = match.string
|
||||||
|
end = _w(s, match.end()).end()
|
||||||
|
nextchar = s[end:end + 1]
|
||||||
|
# Trivial empty object
|
||||||
|
if nextchar == '}':
|
||||||
|
return pairs, end + 1
|
||||||
|
if nextchar != '"':
|
||||||
|
raise ValueError(errmsg("Expecting property name", s, end))
|
||||||
|
end += 1
|
||||||
|
encoding = getattr(context, 'encoding', None)
|
||||||
|
strict = getattr(context, 'strict', True)
|
||||||
|
iterscan = JSONScanner.iterscan
|
||||||
|
while True:
|
||||||
|
key, end = scanstring(s, end, encoding, strict)
|
||||||
|
end = _w(s, end).end()
|
||||||
|
if s[end:end + 1] != ':':
|
||||||
|
raise ValueError(errmsg("Expecting : delimiter", s, end))
|
||||||
|
end = _w(s, end + 1).end()
|
||||||
|
try:
|
||||||
|
value, end = iterscan(s, idx=end, context=context).next()
|
||||||
|
except StopIteration:
|
||||||
|
raise ValueError(errmsg("Expecting object", s, end))
|
||||||
|
pairs[key] = value
|
||||||
|
end = _w(s, end).end()
|
||||||
|
nextchar = s[end:end + 1]
|
||||||
|
end += 1
|
||||||
|
if nextchar == '}':
|
||||||
|
break
|
||||||
|
if nextchar != ',':
|
||||||
|
raise ValueError(errmsg("Expecting , delimiter", s, end - 1))
|
||||||
|
end = _w(s, end).end()
|
||||||
|
nextchar = s[end:end + 1]
|
||||||
|
end += 1
|
||||||
|
if nextchar != '"':
|
||||||
|
raise ValueError(errmsg("Expecting property name", s, end - 1))
|
||||||
|
object_hook = getattr(context, 'object_hook', None)
|
||||||
|
if object_hook is not None:
|
||||||
|
pairs = object_hook(pairs)
|
||||||
|
return pairs, end
|
||||||
|
pattern(r'{')(JSONObject)
|
||||||
|
|
||||||
|
|
||||||
|
def JSONArray(match, context, _w=WHITESPACE.match):
|
||||||
|
values = []
|
||||||
|
s = match.string
|
||||||
|
end = _w(s, match.end()).end()
|
||||||
|
# Look-ahead for trivial empty array
|
||||||
|
nextchar = s[end:end + 1]
|
||||||
|
if nextchar == ']':
|
||||||
|
return values, end + 1
|
||||||
|
iterscan = JSONScanner.iterscan
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
value, end = iterscan(s, idx=end, context=context).next()
|
||||||
|
except StopIteration:
|
||||||
|
raise ValueError(errmsg("Expecting object", s, end))
|
||||||
|
values.append(value)
|
||||||
|
end = _w(s, end).end()
|
||||||
|
nextchar = s[end:end + 1]
|
||||||
|
end += 1
|
||||||
|
if nextchar == ']':
|
||||||
|
break
|
||||||
|
if nextchar != ',':
|
||||||
|
raise ValueError(errmsg("Expecting , delimiter", s, end))
|
||||||
|
end = _w(s, end).end()
|
||||||
|
return values, end
|
||||||
|
pattern(r'\[')(JSONArray)
|
||||||
|
|
||||||
|
|
||||||
|
ANYTHING = [
|
||||||
|
JSONObject,
|
||||||
|
JSONArray,
|
||||||
|
JSONString,
|
||||||
|
JSONConstant,
|
||||||
|
JSONNumber,
|
||||||
|
]
|
||||||
|
|
||||||
|
JSONScanner = Scanner(ANYTHING)
|
||||||
|
|
||||||
|
|
||||||
|
class JSONDecoder(object):
|
||||||
|
"""Simple JSON <http://json.org> decoder
|
||||||
|
|
||||||
|
Performs the following translations in decoding by default:
|
||||||
|
|
||||||
|
+---------------+-------------------+
|
||||||
|
| JSON | Python |
|
||||||
|
+===============+===================+
|
||||||
|
| object | dict |
|
||||||
|
+---------------+-------------------+
|
||||||
|
| array | list |
|
||||||
|
+---------------+-------------------+
|
||||||
|
| string | unicode |
|
||||||
|
+---------------+-------------------+
|
||||||
|
| number (int) | int, long |
|
||||||
|
+---------------+-------------------+
|
||||||
|
| number (real) | float |
|
||||||
|
+---------------+-------------------+
|
||||||
|
| true | True |
|
||||||
|
+---------------+-------------------+
|
||||||
|
| false | False |
|
||||||
|
+---------------+-------------------+
|
||||||
|
| null | None |
|
||||||
|
+---------------+-------------------+
|
||||||
|
|
||||||
|
It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as
|
||||||
|
their corresponding ``float`` values, which is outside the JSON spec.
|
||||||
|
"""
|
||||||
|
|
||||||
|
_scanner = Scanner(ANYTHING)
|
||||||
|
__all__ = ['__init__', 'decode', 'raw_decode']
|
||||||
|
|
||||||
|
def __init__(self, encoding=None, object_hook=None, parse_float=None,
|
||||||
|
parse_int=None, parse_constant=None, strict=True):
|
||||||
|
"""``encoding`` determines the encoding used to interpret any ``str``
|
||||||
|
objects decoded by this instance (utf-8 by default). It has no
|
||||||
|
effect when decoding ``unicode`` objects.
|
||||||
|
|
||||||
|
Note that currently only encodings that are a superset of ASCII work,
|
||||||
|
strings of other encodings should be passed in as ``unicode``.
|
||||||
|
|
||||||
|
``object_hook``, if specified, will be called with the result of
|
||||||
|
every JSON object decoded and its return value will be used in
|
||||||
|
place of the given ``dict``. This can be used to provide custom
|
||||||
|
deserializations (e.g. to support JSON-RPC class hinting).
|
||||||
|
|
||||||
|
``parse_float``, if specified, will be called with the string
|
||||||
|
of every JSON float to be decoded. By default this is equivalent to
|
||||||
|
float(num_str). This can be used to use another datatype or parser
|
||||||
|
for JSON floats (e.g. decimal.Decimal).
|
||||||
|
|
||||||
|
``parse_int``, if specified, will be called with the string
|
||||||
|
of every JSON int to be decoded. By default this is equivalent to
|
||||||
|
int(num_str). This can be used to use another datatype or parser
|
||||||
|
for JSON integers (e.g. float).
|
||||||
|
|
||||||
|
``parse_constant``, if specified, will be called with one of the
|
||||||
|
following strings: -Infinity, Infinity, NaN, null, true, false.
|
||||||
|
This can be used to raise an exception if invalid JSON numbers
|
||||||
|
are encountered.
|
||||||
|
|
||||||
|
"""
|
||||||
|
self.encoding = encoding
|
||||||
|
self.object_hook = object_hook
|
||||||
|
self.parse_float = parse_float
|
||||||
|
self.parse_int = parse_int
|
||||||
|
self.parse_constant = parse_constant
|
||||||
|
self.strict = strict
|
||||||
|
|
||||||
|
def decode(self, s, _w=WHITESPACE.match):
|
||||||
|
"""
|
||||||
|
Return the Python representation of ``s`` (a ``str`` or ``unicode``
|
||||||
|
instance containing a JSON document)
|
||||||
|
|
||||||
|
"""
|
||||||
|
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
|
||||||
|
end = _w(s, end).end()
|
||||||
|
if end != len(s):
|
||||||
|
raise ValueError(errmsg("Extra data", s, end, len(s)))
|
||||||
|
return obj
|
||||||
|
|
||||||
|
def raw_decode(self, s, **kw):
|
||||||
|
"""Decode a JSON document from ``s`` (a ``str`` or ``unicode`` beginning
|
||||||
|
with a JSON document) and return a 2-tuple of the Python
|
||||||
|
representation and the index in ``s`` where the document ended.
|
||||||
|
|
||||||
|
This can be used to decode a JSON document from a string that may
|
||||||
|
have extraneous data at the end.
|
||||||
|
|
||||||
|
"""
|
||||||
|
kw.setdefault('context', self)
|
||||||
|
try:
|
||||||
|
obj, end = self._scanner.iterscan(s, **kw).next()
|
||||||
|
except StopIteration:
|
||||||
|
raise ValueError("No JSON object could be decoded")
|
||||||
|
return obj, end
|
BIN
icarus-miner/data/usr/lib/python2.6/json/decoder.pyc
Normal file
BIN
icarus-miner/data/usr/lib/python2.6/json/decoder.pyc
Normal file
Binary file not shown.
384
icarus-miner/data/usr/lib/python2.6/json/encoder.py
Normal file
384
icarus-miner/data/usr/lib/python2.6/json/encoder.py
Normal file
@ -0,0 +1,384 @@
|
|||||||
|
"""Implementation of JSONEncoder
|
||||||
|
"""
|
||||||
|
|
||||||
|
import re
|
||||||
|
import math
|
||||||
|
|
||||||
|
try:
|
||||||
|
from _json import encode_basestring_ascii as c_encode_basestring_ascii
|
||||||
|
except ImportError:
|
||||||
|
c_encode_basestring_ascii = None
|
||||||
|
|
||||||
|
__all__ = ['JSONEncoder']
|
||||||
|
|
||||||
|
ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]')
|
||||||
|
ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
|
||||||
|
HAS_UTF8 = re.compile(r'[\x80-\xff]')
|
||||||
|
ESCAPE_DCT = {
|
||||||
|
'\\': '\\\\',
|
||||||
|
'"': '\\"',
|
||||||
|
'\b': '\\b',
|
||||||
|
'\f': '\\f',
|
||||||
|
'\n': '\\n',
|
||||||
|
'\r': '\\r',
|
||||||
|
'\t': '\\t',
|
||||||
|
}
|
||||||
|
for i in range(0x20):
|
||||||
|
ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i))
|
||||||
|
|
||||||
|
FLOAT_REPR = repr
|
||||||
|
|
||||||
|
def floatstr(o, allow_nan=True):
|
||||||
|
# Check for specials. Note that this type of test is processor- and/or
|
||||||
|
# platform-specific, so do tests which don't depend on the internals.
|
||||||
|
|
||||||
|
if math.isnan(o):
|
||||||
|
text = 'NaN'
|
||||||
|
elif math.isinf(o):
|
||||||
|
if math.copysign(1., o) == 1.:
|
||||||
|
text = 'Infinity'
|
||||||
|
else:
|
||||||
|
text = '-Infinity'
|
||||||
|
else:
|
||||||
|
return FLOAT_REPR(o)
|
||||||
|
|
||||||
|
if not allow_nan:
|
||||||
|
msg = "Out of range float values are not JSON compliant: " + repr(o)
|
||||||
|
raise ValueError(msg)
|
||||||
|
|
||||||
|
return text
|
||||||
|
|
||||||
|
|
||||||
|
def encode_basestring(s):
|
||||||
|
"""Return a JSON representation of a Python string
|
||||||
|
|
||||||
|
"""
|
||||||
|
def replace(match):
|
||||||
|
return ESCAPE_DCT[match.group(0)]
|
||||||
|
return '"' + ESCAPE.sub(replace, s) + '"'
|
||||||
|
|
||||||
|
|
||||||
|
def py_encode_basestring_ascii(s):
|
||||||
|
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
|
||||||
|
s = s.decode('utf-8')
|
||||||
|
def replace(match):
|
||||||
|
s = match.group(0)
|
||||||
|
try:
|
||||||
|
return ESCAPE_DCT[s]
|
||||||
|
except KeyError:
|
||||||
|
n = ord(s)
|
||||||
|
if n < 0x10000:
|
||||||
|
return '\\u{0:04x}'.format(n)
|
||||||
|
else:
|
||||||
|
# surrogate pair
|
||||||
|
n -= 0x10000
|
||||||
|
s1 = 0xd800 | ((n >> 10) & 0x3ff)
|
||||||
|
s2 = 0xdc00 | (n & 0x3ff)
|
||||||
|
return '\\u{0:04x}\\u{1:04x}'.format(s1, s2)
|
||||||
|
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
|
||||||
|
|
||||||
|
|
||||||
|
if c_encode_basestring_ascii is not None:
|
||||||
|
encode_basestring_ascii = c_encode_basestring_ascii
|
||||||
|
else:
|
||||||
|
encode_basestring_ascii = py_encode_basestring_ascii
|
||||||
|
|
||||||
|
|
||||||
|
class JSONEncoder(object):
|
||||||
|
"""Extensible JSON <http://json.org> encoder for Python data structures.
|
||||||
|
|
||||||
|
Supports the following objects and types by default:
|
||||||
|
|
||||||
|
+-------------------+---------------+
|
||||||
|
| Python | JSON |
|
||||||
|
+===================+===============+
|
||||||
|
| dict | object |
|
||||||
|
+-------------------+---------------+
|
||||||
|
| list, tuple | array |
|
||||||
|
+-------------------+---------------+
|
||||||
|
| str, unicode | string |
|
||||||
|
+-------------------+---------------+
|
||||||
|
| int, long, float | number |
|
||||||
|
+-------------------+---------------+
|
||||||
|
| True | true |
|
||||||
|
+-------------------+---------------+
|
||||||
|
| False | false |
|
||||||
|
+-------------------+---------------+
|
||||||
|
| None | null |
|
||||||
|
+-------------------+---------------+
|
||||||
|
|
||||||
|
To extend this to recognize other objects, subclass and implement a
|
||||||
|
``.default()`` method with another method that returns a serializable
|
||||||
|
object for ``o`` if possible, otherwise it should call the superclass
|
||||||
|
implementation (to raise ``TypeError``).
|
||||||
|
|
||||||
|
"""
|
||||||
|
__all__ = ['__init__', 'default', 'encode', 'iterencode']
|
||||||
|
item_separator = ', '
|
||||||
|
key_separator = ': '
|
||||||
|
def __init__(self, skipkeys=False, ensure_ascii=True,
|
||||||
|
check_circular=True, allow_nan=True, sort_keys=False,
|
||||||
|
indent=None, separators=None, encoding='utf-8', default=None):
|
||||||
|
"""Constructor for JSONEncoder, with sensible defaults.
|
||||||
|
|
||||||
|
If skipkeys is False, then it is a TypeError to attempt
|
||||||
|
encoding of keys that are not str, int, long, float or None. If
|
||||||
|
skipkeys is True, such items are simply skipped.
|
||||||
|
|
||||||
|
If ensure_ascii is True, the output is guaranteed to be str
|
||||||
|
objects with all incoming unicode characters escaped. If
|
||||||
|
ensure_ascii is false, the output will be unicode object.
|
||||||
|
|
||||||
|
If check_circular is True, then lists, dicts, and custom encoded
|
||||||
|
objects will be checked for circular references during encoding to
|
||||||
|
prevent an infinite recursion (which would cause an OverflowError).
|
||||||
|
Otherwise, no such check takes place.
|
||||||
|
|
||||||
|
If allow_nan is True, then NaN, Infinity, and -Infinity will be
|
||||||
|
encoded as such. This behavior is not JSON specification compliant,
|
||||||
|
but is consistent with most JavaScript based encoders and decoders.
|
||||||
|
Otherwise, it will be a ValueError to encode such floats.
|
||||||
|
|
||||||
|
If sort_keys is True, then the output of dictionaries will be
|
||||||
|
sorted by key; this is useful for regression tests to ensure
|
||||||
|
that JSON serializations can be compared on a day-to-day basis.
|
||||||
|
|
||||||
|
If indent is a non-negative integer, then JSON array
|
||||||
|
elements and object members will be pretty-printed with that
|
||||||
|
indent level. An indent level of 0 will only insert newlines.
|
||||||
|
None is the most compact representation.
|
||||||
|
|
||||||
|
If specified, separators should be a (item_separator, key_separator)
|
||||||
|
tuple. The default is (', ', ': '). To get the most compact JSON
|
||||||
|
representation you should specify (',', ':') to eliminate whitespace.
|
||||||
|
|
||||||
|
If specified, default is a function that gets called for objects
|
||||||
|
that can't otherwise be serialized. It should return a JSON encodable
|
||||||
|
version of the object or raise a ``TypeError``.
|
||||||
|
|
||||||
|
If encoding is not None, then all input strings will be
|
||||||
|
transformed into unicode using that encoding prior to JSON-encoding.
|
||||||
|
The default is UTF-8.
|
||||||
|
|
||||||
|
"""
|
||||||
|
self.skipkeys = skipkeys
|
||||||
|
self.ensure_ascii = ensure_ascii
|
||||||
|
self.check_circular = check_circular
|
||||||
|
self.allow_nan = allow_nan
|
||||||
|
self.sort_keys = sort_keys
|
||||||
|
self.indent = indent
|
||||||
|
self.current_indent_level = 0
|
||||||
|
if separators is not None:
|
||||||
|
self.item_separator, self.key_separator = separators
|
||||||
|
if default is not None:
|
||||||
|
self.default = default
|
||||||
|
self.encoding = encoding
|
||||||
|
|
||||||
|
def _newline_indent(self):
|
||||||
|
return '\n' + (' ' * (self.indent * self.current_indent_level))
|
||||||
|
|
||||||
|
def _iterencode_list(self, lst, markers=None):
|
||||||
|
if not lst:
|
||||||
|
yield '[]'
|
||||||
|
return
|
||||||
|
if markers is not None:
|
||||||
|
markerid = id(lst)
|
||||||
|
if markerid in markers:
|
||||||
|
raise ValueError("Circular reference detected")
|
||||||
|
markers[markerid] = lst
|
||||||
|
yield '['
|
||||||
|
if self.indent is not None:
|
||||||
|
self.current_indent_level += 1
|
||||||
|
newline_indent = self._newline_indent()
|
||||||
|
separator = self.item_separator + newline_indent
|
||||||
|
yield newline_indent
|
||||||
|
else:
|
||||||
|
newline_indent = None
|
||||||
|
separator = self.item_separator
|
||||||
|
first = True
|
||||||
|
for value in lst:
|
||||||
|
if first:
|
||||||
|
first = False
|
||||||
|
else:
|
||||||
|
yield separator
|
||||||
|
for chunk in self._iterencode(value, markers):
|
||||||
|
yield chunk
|
||||||
|
if newline_indent is not None:
|
||||||
|
self.current_indent_level -= 1
|
||||||
|
yield self._newline_indent()
|
||||||
|
yield ']'
|
||||||
|
if markers is not None:
|
||||||
|
del markers[markerid]
|
||||||
|
|
||||||
|
def _iterencode_dict(self, dct, markers=None):
|
||||||
|
if not dct:
|
||||||
|
yield '{}'
|
||||||
|
return
|
||||||
|
if markers is not None:
|
||||||
|
markerid = id(dct)
|
||||||
|
if markerid in markers:
|
||||||
|
raise ValueError("Circular reference detected")
|
||||||
|
markers[markerid] = dct
|
||||||
|
yield '{'
|
||||||
|
key_separator = self.key_separator
|
||||||
|
if self.indent is not None:
|
||||||
|
self.current_indent_level += 1
|
||||||
|
newline_indent = self._newline_indent()
|
||||||
|
item_separator = self.item_separator + newline_indent
|
||||||
|
yield newline_indent
|
||||||
|
else:
|
||||||
|
newline_indent = None
|
||||||
|
item_separator = self.item_separator
|
||||||
|
first = True
|
||||||
|
if self.ensure_ascii:
|
||||||
|
encoder = encode_basestring_ascii
|
||||||
|
else:
|
||||||
|
encoder = encode_basestring
|
||||||
|
allow_nan = self.allow_nan
|
||||||
|
if self.sort_keys:
|
||||||
|
keys = dct.keys()
|
||||||
|
keys.sort()
|
||||||
|
items = [(k, dct[k]) for k in keys]
|
||||||
|
else:
|
||||||
|
items = dct.iteritems()
|
||||||
|
_encoding = self.encoding
|
||||||
|
_do_decode = (_encoding is not None
|
||||||
|
and not (_encoding == 'utf-8'))
|
||||||
|
for key, value in items:
|
||||||
|
if isinstance(key, str):
|
||||||
|
if _do_decode:
|
||||||
|
key = key.decode(_encoding)
|
||||||
|
elif isinstance(key, basestring):
|
||||||
|
pass
|
||||||
|
# JavaScript is weakly typed for these, so it makes sense to
|
||||||
|
# also allow them. Many encoders seem to do something like this.
|
||||||
|
elif isinstance(key, float):
|
||||||
|
key = floatstr(key, allow_nan)
|
||||||
|
elif isinstance(key, (int, long)):
|
||||||
|
key = str(key)
|
||||||
|
elif key is True:
|
||||||
|
key = 'true'
|
||||||
|
elif key is False:
|
||||||
|
key = 'false'
|
||||||
|
elif key is None:
|
||||||
|
key = 'null'
|
||||||
|
elif self.skipkeys:
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
raise TypeError("key {0!r} is not a string".format(key))
|
||||||
|
if first:
|
||||||
|
first = False
|
||||||
|
else:
|
||||||
|
yield item_separator
|
||||||
|
yield encoder(key)
|
||||||
|
yield key_separator
|
||||||
|
for chunk in self._iterencode(value, markers):
|
||||||
|
yield chunk
|
||||||
|
if newline_indent is not None:
|
||||||
|
self.current_indent_level -= 1
|
||||||
|
yield self._newline_indent()
|
||||||
|
yield '}'
|
||||||
|
if markers is not None:
|
||||||
|
del markers[markerid]
|
||||||
|
|
||||||
|
def _iterencode(self, o, markers=None):
|
||||||
|
if isinstance(o, basestring):
|
||||||
|
if self.ensure_ascii:
|
||||||
|
encoder = encode_basestring_ascii
|
||||||
|
else:
|
||||||
|
encoder = encode_basestring
|
||||||
|
_encoding = self.encoding
|
||||||
|
if (_encoding is not None and isinstance(o, str)
|
||||||
|
and not (_encoding == 'utf-8')):
|
||||||
|
o = o.decode(_encoding)
|
||||||
|
yield encoder(o)
|
||||||
|
elif o is None:
|
||||||
|
yield 'null'
|
||||||
|
elif o is True:
|
||||||
|
yield 'true'
|
||||||
|
elif o is False:
|
||||||
|
yield 'false'
|
||||||
|
elif isinstance(o, (int, long)):
|
||||||
|
yield str(o)
|
||||||
|
elif isinstance(o, float):
|
||||||
|
yield floatstr(o, self.allow_nan)
|
||||||
|
elif isinstance(o, (list, tuple)):
|
||||||
|
for chunk in self._iterencode_list(o, markers):
|
||||||
|
yield chunk
|
||||||
|
elif isinstance(o, dict):
|
||||||
|
for chunk in self._iterencode_dict(o, markers):
|
||||||
|
yield chunk
|
||||||
|
else:
|
||||||
|
if markers is not None:
|
||||||
|
markerid = id(o)
|
||||||
|
if markerid in markers:
|
||||||
|
raise ValueError("Circular reference detected")
|
||||||
|
markers[markerid] = o
|
||||||
|
for chunk in self._iterencode_default(o, markers):
|
||||||
|
yield chunk
|
||||||
|
if markers is not None:
|
||||||
|
del markers[markerid]
|
||||||
|
|
||||||
|
def _iterencode_default(self, o, markers=None):
|
||||||
|
newobj = self.default(o)
|
||||||
|
return self._iterencode(newobj, markers)
|
||||||
|
|
||||||
|
def default(self, o):
|
||||||
|
"""Implement this method in a subclass such that it returns a serializable
|
||||||
|
object for ``o``, or calls the base implementation (to raise a
|
||||||
|
``TypeError``).
|
||||||
|
|
||||||
|
For example, to support arbitrary iterators, you could implement
|
||||||
|
default like this::
|
||||||
|
|
||||||
|
def default(self, o):
|
||||||
|
try:
|
||||||
|
iterable = iter(o)
|
||||||
|
except TypeError:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
return list(iterable)
|
||||||
|
return JSONEncoder.default(self, o)
|
||||||
|
|
||||||
|
"""
|
||||||
|
raise TypeError(repr(o) + " is not JSON serializable")
|
||||||
|
|
||||||
|
def encode(self, o):
|
||||||
|
"""Return a JSON string representation of a Python data structure.
|
||||||
|
|
||||||
|
>>> JSONEncoder().encode({"foo": ["bar", "baz"]})
|
||||||
|
'{"foo": ["bar", "baz"]}'
|
||||||
|
|
||||||
|
"""
|
||||||
|
# This is for extremely simple cases and benchmarks.
|
||||||
|
if isinstance(o, basestring):
|
||||||
|
if isinstance(o, str):
|
||||||
|
_encoding = self.encoding
|
||||||
|
if (_encoding is not None
|
||||||
|
and not (_encoding == 'utf-8')):
|
||||||
|
o = o.decode(_encoding)
|
||||||
|
if self.ensure_ascii:
|
||||||
|
return encode_basestring_ascii(o)
|
||||||
|
else:
|
||||||
|
return encode_basestring(o)
|
||||||
|
# This doesn't pass the iterator directly to ''.join() because the
|
||||||
|
# exceptions aren't as detailed. The list call should be roughly
|
||||||
|
# equivalent to the PySequence_Fast that ''.join() would do.
|
||||||
|
chunks = list(self.iterencode(o))
|
||||||
|
return ''.join(chunks)
|
||||||
|
|
||||||
|
def iterencode(self, o):
|
||||||
|
"""Encode the given object and yield each string representation as
|
||||||
|
available.
|
||||||
|
|
||||||
|
For example::
|
||||||
|
|
||||||
|
for chunk in JSONEncoder().iterencode(bigobject):
|
||||||
|
mysocket.write(chunk)
|
||||||
|
|
||||||
|
"""
|
||||||
|
if self.check_circular:
|
||||||
|
markers = {}
|
||||||
|
else:
|
||||||
|
markers = None
|
||||||
|
return self._iterencode(o, markers)
|
BIN
icarus-miner/data/usr/lib/python2.6/json/encoder.pyc
Normal file
BIN
icarus-miner/data/usr/lib/python2.6/json/encoder.pyc
Normal file
Binary file not shown.
69
icarus-miner/data/usr/lib/python2.6/json/scanner.py
Normal file
69
icarus-miner/data/usr/lib/python2.6/json/scanner.py
Normal file
@ -0,0 +1,69 @@
|
|||||||
|
"""Iterator based sre token scanner
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
import re
|
||||||
|
import sre_parse
|
||||||
|
import sre_compile
|
||||||
|
import sre_constants
|
||||||
|
|
||||||
|
from re import VERBOSE, MULTILINE, DOTALL
|
||||||
|
from sre_constants import BRANCH, SUBPATTERN
|
||||||
|
|
||||||
|
__all__ = ['Scanner', 'pattern']
|
||||||
|
|
||||||
|
FLAGS = (VERBOSE | MULTILINE | DOTALL)
|
||||||
|
|
||||||
|
class Scanner(object):
|
||||||
|
def __init__(self, lexicon, flags=FLAGS):
|
||||||
|
self.actions = [None]
|
||||||
|
# Combine phrases into a compound pattern
|
||||||
|
s = sre_parse.Pattern()
|
||||||
|
s.flags = flags
|
||||||
|
p = []
|
||||||
|
for idx, token in enumerate(lexicon):
|
||||||
|
phrase = token.pattern
|
||||||
|
try:
|
||||||
|
subpattern = sre_parse.SubPattern(s,
|
||||||
|
[(SUBPATTERN, (idx + 1, sre_parse.parse(phrase, flags)))])
|
||||||
|
except sre_constants.error:
|
||||||
|
raise
|
||||||
|
p.append(subpattern)
|
||||||
|
self.actions.append(token)
|
||||||
|
|
||||||
|
s.groups = len(p) + 1 # NOTE(guido): Added to make SRE validation work
|
||||||
|
p = sre_parse.SubPattern(s, [(BRANCH, (None, p))])
|
||||||
|
self.scanner = sre_compile.compile(p)
|
||||||
|
|
||||||
|
def iterscan(self, string, idx=0, context=None):
|
||||||
|
"""Yield match, end_idx for each match
|
||||||
|
|
||||||
|
"""
|
||||||
|
match = self.scanner.scanner(string, idx).match
|
||||||
|
actions = self.actions
|
||||||
|
lastend = idx
|
||||||
|
end = len(string)
|
||||||
|
while True:
|
||||||
|
m = match()
|
||||||
|
if m is None:
|
||||||
|
break
|
||||||
|
matchbegin, matchend = m.span()
|
||||||
|
if lastend == matchend:
|
||||||
|
break
|
||||||
|
action = actions[m.lastindex]
|
||||||
|
if action is not None:
|
||||||
|
rval, next_pos = action(m, context)
|
||||||
|
if next_pos is not None and next_pos != matchend:
|
||||||
|
# "fast forward" the scanner
|
||||||
|
matchend = next_pos
|
||||||
|
match = self.scanner.scanner(string, matchend).match
|
||||||
|
yield rval, matchend
|
||||||
|
lastend = matchend
|
||||||
|
|
||||||
|
|
||||||
|
def pattern(pattern, flags=FLAGS):
|
||||||
|
def decorator(fn):
|
||||||
|
fn.pattern = pattern
|
||||||
|
fn.regex = re.compile(pattern, flags)
|
||||||
|
return fn
|
||||||
|
return decorator
|
BIN
icarus-miner/data/usr/lib/python2.6/json/scanner.pyc
Normal file
BIN
icarus-miner/data/usr/lib/python2.6/json/scanner.pyc
Normal file
Binary file not shown.
37
icarus-miner/data/usr/lib/python2.6/json/tool.py
Normal file
37
icarus-miner/data/usr/lib/python2.6/json/tool.py
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
r"""Command-line tool to validate and pretty-print JSON
|
||||||
|
|
||||||
|
Usage::
|
||||||
|
|
||||||
|
$ echo '{"json":"obj"}' | python -mjson.tool
|
||||||
|
{
|
||||||
|
"json": "obj"
|
||||||
|
}
|
||||||
|
$ echo '{ 1.2:3.4}' | python -mjson.tool
|
||||||
|
Expecting property name: line 1 column 2 (char 2)
|
||||||
|
|
||||||
|
"""
|
||||||
|
import sys
|
||||||
|
import json
|
||||||
|
|
||||||
|
def main():
|
||||||
|
if len(sys.argv) == 1:
|
||||||
|
infile = sys.stdin
|
||||||
|
outfile = sys.stdout
|
||||||
|
elif len(sys.argv) == 2:
|
||||||
|
infile = open(sys.argv[1], 'rb')
|
||||||
|
outfile = sys.stdout
|
||||||
|
elif len(sys.argv) == 3:
|
||||||
|
infile = open(sys.argv[1], 'rb')
|
||||||
|
outfile = open(sys.argv[2], 'wb')
|
||||||
|
else:
|
||||||
|
raise SystemExit("{0} [infile [outfile]]".format(sys.argv[0]))
|
||||||
|
try:
|
||||||
|
obj = json.load(infile)
|
||||||
|
except ValueError, e:
|
||||||
|
raise SystemExit(e)
|
||||||
|
json.dump(obj, outfile, sort_keys=True, indent=4)
|
||||||
|
outfile.write('\n')
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
26
icarus-miner/data/usr/lib/python2.6/jsonrpc/__init__.py
Normal file
26
icarus-miner/data/usr/lib/python2.6/jsonrpc/__init__.py
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
|
||||||
|
"""
|
||||||
|
Copyright (c) 2007 Jan-Klaas Kollhof
|
||||||
|
|
||||||
|
This file is part of jsonrpc.
|
||||||
|
|
||||||
|
jsonrpc is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2.1 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This software is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU Lesser General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU Lesser General Public License
|
||||||
|
along with this software; if not, write to the Free Software
|
||||||
|
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||||
|
"""
|
||||||
|
|
||||||
|
from jsonrpc.json import loads, dumps, JSONEncodeException, JSONDecodeException
|
||||||
|
from jsonrpc.proxy import ServiceProxy, JSONRPCException
|
||||||
|
from jsonrpc.serviceHandler import ServiceMethod, ServiceHandler, ServiceMethodNotFound, ServiceException
|
||||||
|
from jsonrpc.cgiwrapper import handleCGI
|
||||||
|
from jsonrpc.modpywrapper import handler
|
BIN
icarus-miner/data/usr/lib/python2.6/jsonrpc/__init__.pyc
Normal file
BIN
icarus-miner/data/usr/lib/python2.6/jsonrpc/__init__.pyc
Normal file
Binary file not shown.
@ -0,0 +1,20 @@
|
|||||||
|
|
||||||
|
"""
|
||||||
|
Copyright (c) 2007 Jan-Klaas Kollhof
|
||||||
|
|
||||||
|
This file is part of jsonrpc.
|
||||||
|
|
||||||
|
jsonrpc is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2.1 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This software is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU Lesser General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU Lesser General Public License
|
||||||
|
along with this software; if not, write to the Free Software
|
||||||
|
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||||
|
"""
|
@ -0,0 +1,55 @@
|
|||||||
|
|
||||||
|
"""
|
||||||
|
Copyright (c) 2007 Jan-Klaas Kollhof
|
||||||
|
|
||||||
|
This file is part of jsonrpc.
|
||||||
|
|
||||||
|
jsonrpc is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2.1 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This software is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU Lesser General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU Lesser General Public License
|
||||||
|
along with this software; if not, write to the Free Software
|
||||||
|
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||||
|
"""
|
||||||
|
import unittest
|
||||||
|
import jsonrpc
|
||||||
|
from types import *
|
||||||
|
|
||||||
|
class Service(object):
|
||||||
|
@jsonrpc.ServiceMethod
|
||||||
|
def echo(self, arg):
|
||||||
|
return arg
|
||||||
|
|
||||||
|
|
||||||
|
class TestCGIWrapper(unittest.TestCase):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def test_runCGIHandler(self):
|
||||||
|
from StringIO import StringIO
|
||||||
|
|
||||||
|
json=u'{"method":"echo","params":["foobar"], "id":""}'
|
||||||
|
fin=StringIO(json)
|
||||||
|
fout=StringIO()
|
||||||
|
|
||||||
|
env = {"CONTENT_LENGTH":len(json)}
|
||||||
|
|
||||||
|
jsonrpc.handleCGI(service=Service(), fin=fin, fout=fout, env=env)
|
||||||
|
|
||||||
|
data = StringIO(fout.getvalue())
|
||||||
|
data.readline()
|
||||||
|
data.readline()
|
||||||
|
data = data.read()
|
||||||
|
self.assertEquals(jsonrpc.loads(data), {"result":"foobar", "error":None, "id":""})
|
||||||
|
|
184
icarus-miner/data/usr/lib/python2.6/jsonrpc/_tests/test_json.py
Normal file
184
icarus-miner/data/usr/lib/python2.6/jsonrpc/_tests/test_json.py
Normal file
@ -0,0 +1,184 @@
|
|||||||
|
|
||||||
|
"""
|
||||||
|
Copyright (c) 2007 Jan-Klaas Kollhof
|
||||||
|
|
||||||
|
This file is part of jsonrpc.
|
||||||
|
|
||||||
|
jsonrpc is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2.1 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This software is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU Lesser General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU Lesser General Public License
|
||||||
|
along with this software; if not, write to the Free Software
|
||||||
|
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||||
|
"""
|
||||||
|
|
||||||
|
import unittest
|
||||||
|
import jsonrpc
|
||||||
|
from types import *
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class TestDumps(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def assertJSON(self, json, expectedJSON):
|
||||||
|
self.assert_(type(json) is UnicodeType)
|
||||||
|
self.assertEqual(json, expectedJSON)
|
||||||
|
|
||||||
|
def test_Number(self):
|
||||||
|
json = jsonrpc.dumps(1)
|
||||||
|
self.assertJSON(json, u'1')
|
||||||
|
|
||||||
|
json = jsonrpc.dumps(0xffffffffffffffffffffffff)
|
||||||
|
self.assertJSON(json, u'79228162514264337593543950335')
|
||||||
|
|
||||||
|
def test_None(self):
|
||||||
|
json = jsonrpc.dumps(None)
|
||||||
|
self.assertJSON(json, u'null')
|
||||||
|
|
||||||
|
def test_Boolean(self):
|
||||||
|
json = jsonrpc.dumps(False)
|
||||||
|
self.assertJSON(json, u'false')
|
||||||
|
json = jsonrpc.dumps(True)
|
||||||
|
self.assertJSON(json, u'true')
|
||||||
|
|
||||||
|
def test_Float(self):
|
||||||
|
json = jsonrpc.dumps(1.2345)
|
||||||
|
self.assertJSON(json, u'1.2345')
|
||||||
|
|
||||||
|
json =jsonrpc.dumps(1.2345e67)
|
||||||
|
self.assertJSON(json, u'1.2345e+67')
|
||||||
|
|
||||||
|
json =jsonrpc.dumps(1.2345e-67)
|
||||||
|
self.assertJSON(json, u'1.2345e-67')
|
||||||
|
|
||||||
|
def test_String(self):
|
||||||
|
json = jsonrpc.dumps('foobar')
|
||||||
|
self.assertJSON(json, u'"foobar"')
|
||||||
|
|
||||||
|
json = jsonrpc.dumps('foobar')
|
||||||
|
self.assertJSON(json, u'"foobar"')
|
||||||
|
|
||||||
|
def test_StringEscapedChars(self):
|
||||||
|
json = jsonrpc.dumps('\n \f \t \b \r \\ " /')
|
||||||
|
self.assertJSON(json, u'"\\n \\f \\t \\b \\r \\\\ \\" \\/"')
|
||||||
|
|
||||||
|
def test_StringEscapedUnicodeChars(self):
|
||||||
|
json = jsonrpc.dumps(u'\0 \x19 \x20\u0130')
|
||||||
|
self.assertJSON(json, u'"\\u0000 \\u0019 \u0130"')
|
||||||
|
|
||||||
|
def test_Array(self):
|
||||||
|
json = jsonrpc.dumps([1, 2.3e45, 'foobar'])
|
||||||
|
self.assertJSON(json, u'[1,2.3e+45,"foobar"]')
|
||||||
|
|
||||||
|
def test_Dictionary(self):
|
||||||
|
json = jsonrpc.dumps({'foobar':'spam', 'a':[1,2,3]})
|
||||||
|
self.assertJSON(json, u'{"a":[1,2,3],"foobar":"spam"}')
|
||||||
|
|
||||||
|
def test_FailOther(self):
|
||||||
|
self.failUnlessRaises(jsonrpc.JSONEncodeException, lambda:jsonrpc.dumps(self))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class TestLoads(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def test_String(self):
|
||||||
|
|
||||||
|
json = jsonrpc.dumps("foobar")
|
||||||
|
obj = jsonrpc.loads(json)
|
||||||
|
self.assertEquals(obj, u"foobar")
|
||||||
|
|
||||||
|
def test_StringEscapedChars(self):
|
||||||
|
json = '"\\n \\t \\r \\b \\f \\\\ \\/ /"'
|
||||||
|
obj = jsonrpc.loads(json)
|
||||||
|
self.assertEquals(obj, u'\n \t \r \b \f \\ / /')
|
||||||
|
|
||||||
|
def test_StringEscapedUnicodeChars(self):
|
||||||
|
json = jsonrpc.dumps(u'\u0000 \u0019')
|
||||||
|
obj = jsonrpc.loads(json)
|
||||||
|
self.assertEquals(obj, u'\0 \x19')
|
||||||
|
|
||||||
|
def test_Array(self):
|
||||||
|
json = jsonrpc.dumps(['1', ['2','3']])
|
||||||
|
obj = jsonrpc.loads(json)
|
||||||
|
self.assertEquals(obj, ['1', ['2','3']])
|
||||||
|
|
||||||
|
def test_Dictionary(self):
|
||||||
|
json = jsonrpc.dumps({'foobar':'spam', 'nested':{'a':'b'}})
|
||||||
|
obj = jsonrpc.loads(json)
|
||||||
|
self.assertEquals(obj, {'foobar':'spam', 'nested':{'a':'b'}})
|
||||||
|
|
||||||
|
|
||||||
|
def test_Int(self):
|
||||||
|
json = jsonrpc.dumps(1234)
|
||||||
|
obj = jsonrpc.loads(json)
|
||||||
|
self.assertEquals(obj, 1234)
|
||||||
|
|
||||||
|
|
||||||
|
def test_NegativeInt(self):
|
||||||
|
json = jsonrpc.dumps(-1234)
|
||||||
|
obj = jsonrpc.loads(json)
|
||||||
|
self.assertEquals(obj, -1234)
|
||||||
|
|
||||||
|
def test_NumberAtEndOfArray(self):
|
||||||
|
json = jsonrpc.dumps([-1234])
|
||||||
|
obj = jsonrpc.loads(json)
|
||||||
|
self.assertEquals(obj, [-1234])
|
||||||
|
|
||||||
|
def test_StrAtEndOfArray(self):
|
||||||
|
json = jsonrpc.dumps(['foobar'])
|
||||||
|
obj = jsonrpc.loads(json)
|
||||||
|
self.assertEquals(obj, ['foobar'])
|
||||||
|
|
||||||
|
def test_Float(self):
|
||||||
|
json = jsonrpc.dumps(1234.567)
|
||||||
|
obj = jsonrpc.loads(json)
|
||||||
|
self.assertEquals(obj, 1234.567)
|
||||||
|
|
||||||
|
def test_Exponential(self):
|
||||||
|
json = jsonrpc.dumps(1234.567e89)
|
||||||
|
obj = jsonrpc.loads(json)
|
||||||
|
self.assertEquals(obj, 1234.567e89)
|
||||||
|
|
||||||
|
def test_True(self):
|
||||||
|
json = jsonrpc.dumps(True)
|
||||||
|
obj = jsonrpc.loads(json)
|
||||||
|
self.assertEquals(obj, True)
|
||||||
|
|
||||||
|
def test_False(self):
|
||||||
|
json = jsonrpc.dumps(False)
|
||||||
|
obj = jsonrpc.loads(json)
|
||||||
|
self.assertEquals(obj, False)
|
||||||
|
|
||||||
|
def test_None(self):
|
||||||
|
json = jsonrpc.dumps(None)
|
||||||
|
obj = jsonrpc.loads(json)
|
||||||
|
self.assertEquals(obj, None)
|
||||||
|
|
||||||
|
def test_NestedDictAllTypes(self):
|
||||||
|
json = jsonrpc.dumps({'s':'foobar', 'int':1234, 'float':1234.567, 'exp':1234.56e78,
|
||||||
|
'negInt':-1234, 'None':None,'True':True, 'False':False,
|
||||||
|
'list':[1,2,4,{}], 'dict':{'a':'b'}})
|
||||||
|
obj = jsonrpc.loads(json)
|
||||||
|
self.assertEquals(obj, {'s':'foobar', 'int':1234, 'float':1234.567, 'exp':1234.56e78,
|
||||||
|
'negInt':-1234, 'None':None,'True':True, 'False':False,
|
||||||
|
'list':[1,2,4,{}], 'dict':{'a':'b'}})
|
@ -0,0 +1,98 @@
|
|||||||
|
|
||||||
|
"""
|
||||||
|
Copyright (c) 2007 Jan-Klaas Kollhof
|
||||||
|
|
||||||
|
This file is part of jsonrpc.
|
||||||
|
|
||||||
|
jsonrpc is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2.1 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This software is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU Lesser General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU Lesser General Public License
|
||||||
|
along with this software; if not, write to the Free Software
|
||||||
|
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||||
|
"""
|
||||||
|
import unittest
|
||||||
|
import jsonrpc
|
||||||
|
from types import *
|
||||||
|
|
||||||
|
class Service(object):
|
||||||
|
@jsonrpc.ServiceMethod
|
||||||
|
def echo(self, arg):
|
||||||
|
return arg
|
||||||
|
|
||||||
|
|
||||||
|
class ApacheRequestMockup(object):
|
||||||
|
|
||||||
|
def __init__(self, filename, fin, fout):
|
||||||
|
self.fin=fin
|
||||||
|
self.fout = fout
|
||||||
|
self.filename = filename
|
||||||
|
|
||||||
|
def write(self,data):
|
||||||
|
self.fout.write(data)
|
||||||
|
|
||||||
|
def flush(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def read(self):
|
||||||
|
return self.fin.read()
|
||||||
|
|
||||||
|
class ModPyMockup(object):
|
||||||
|
def __init__(self):
|
||||||
|
self.apache=ApacheModuleMockup()
|
||||||
|
|
||||||
|
class ApacheModuleMockup(object):
|
||||||
|
def __getattr__(self, name):
|
||||||
|
return name
|
||||||
|
|
||||||
|
def import_module(self, moduleName, log=1):
|
||||||
|
return Service()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class TestModPyWrapper(unittest.TestCase):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
import sys
|
||||||
|
sys.modules['mod_python'] =ModPyMockup()
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def test_runHandler(self):
|
||||||
|
from StringIO import StringIO
|
||||||
|
|
||||||
|
json=u'{"method":"echo","params":["foobar"], "id":""}'
|
||||||
|
fin=StringIO(json)
|
||||||
|
fout=StringIO()
|
||||||
|
req = ApacheRequestMockup(__file__ , fin, fout)
|
||||||
|
|
||||||
|
jsonrpc.handler(req)
|
||||||
|
|
||||||
|
data = fout.getvalue()
|
||||||
|
|
||||||
|
self.assertEquals(jsonrpc.loads(data), {"result":"foobar", "error":None, "id":""})
|
||||||
|
|
||||||
|
def test_ServiceImplementationNotFound(self):
|
||||||
|
from StringIO import StringIO
|
||||||
|
|
||||||
|
json=u'{"method":"echo","params":["foobar"], "id":""}'
|
||||||
|
fin=StringIO(json)
|
||||||
|
fout=StringIO()
|
||||||
|
req = ApacheRequestMockup("foobar" , fin, fout)
|
||||||
|
|
||||||
|
rslt = jsonrpc.handler(req)
|
||||||
|
self.assertEquals(rslt, "OK")
|
||||||
|
data = fout.getvalue()
|
||||||
|
|
||||||
|
self.assertEquals(jsonrpc.loads(data), {u'id': '', u'result': None, u'error': {u'message': '', u'name': u'ServiceImplementaionNotFound'}} )
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -0,0 +1,61 @@
|
|||||||
|
|
||||||
|
"""
|
||||||
|
Copyright (c) 2007 Jan-Klaas Kollhof
|
||||||
|
|
||||||
|
This file is part of jsonrpc.
|
||||||
|
|
||||||
|
jsonrpc is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2.1 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This software is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU Lesser General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU Lesser General Public License
|
||||||
|
along with this software; if not, write to the Free Software
|
||||||
|
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||||
|
"""
|
||||||
|
|
||||||
|
import unittest
|
||||||
|
import jsonrpc
|
||||||
|
|
||||||
|
import urllib
|
||||||
|
|
||||||
|
from StringIO import StringIO
|
||||||
|
|
||||||
|
class TestProxy(unittest.TestCase):
|
||||||
|
|
||||||
|
def urlopen(self, url, data):
|
||||||
|
self.postdata = data
|
||||||
|
return StringIO(self.respdata)
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
self.postdata=""
|
||||||
|
self.urllib_openurl = urllib.urlopen
|
||||||
|
urllib.urlopen = self.urlopen
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
urllib.urlopen = self.urllib_openurl
|
||||||
|
|
||||||
|
def test_ProvidesProxyMethod(self):
|
||||||
|
s = jsonrpc.ServiceProxy("http://localhost/")
|
||||||
|
self.assert_(callable(s.echo))
|
||||||
|
|
||||||
|
def test_MethodCallCallsService(self):
|
||||||
|
|
||||||
|
s = jsonrpc.ServiceProxy("http://localhost/")
|
||||||
|
|
||||||
|
self.respdata='{"result":"foobar","error":null,"id":""}'
|
||||||
|
echo = s.echo("foobar")
|
||||||
|
self.assertEquals(self.postdata, jsonrpc.dumps({"method":"echo", 'params':['foobar'], 'id':'jsonrpc'}))
|
||||||
|
self.assertEquals(echo, 'foobar')
|
||||||
|
|
||||||
|
self.respdata='{"result":null,"error":"MethodNotFound","id":""}'
|
||||||
|
try:
|
||||||
|
s.echo("foobar")
|
||||||
|
except jsonrpc.JSONRPCException,e:
|
||||||
|
self.assertEquals(e.error, "MethodNotFound")
|
||||||
|
|
@ -0,0 +1,153 @@
|
|||||||
|
|
||||||
|
"""
|
||||||
|
Copyright (c) 2007 Jan-Klaas Kollhof
|
||||||
|
|
||||||
|
This file is part of jsonrpc.
|
||||||
|
|
||||||
|
jsonrpc is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2.1 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This software is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU Lesser General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU Lesser General Public License
|
||||||
|
along with this software; if not, write to the Free Software
|
||||||
|
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
import unittest
|
||||||
|
import jsonrpc
|
||||||
|
from types import *
|
||||||
|
|
||||||
|
|
||||||
|
class Service(object):
|
||||||
|
@jsonrpc.ServiceMethod
|
||||||
|
def echo(self, arg):
|
||||||
|
return arg
|
||||||
|
|
||||||
|
def not_a_serviceMethod(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
@jsonrpc.ServiceMethod
|
||||||
|
def raiseError(self):
|
||||||
|
raise Exception("foobar")
|
||||||
|
|
||||||
|
class Handler(jsonrpc.ServiceHandler):
|
||||||
|
def __init__(self, service):
|
||||||
|
self.service=service
|
||||||
|
|
||||||
|
def translateRequest(self, data):
|
||||||
|
self._requestTranslated=True
|
||||||
|
return jsonrpc.ServiceHandler.translateRequest(self, data)
|
||||||
|
|
||||||
|
def findServiceEndpoint(self, name):
|
||||||
|
self._foundServiceEndpoint=True
|
||||||
|
return jsonrpc.ServiceHandler.findServiceEndpoint(self, name)
|
||||||
|
|
||||||
|
def invokeServiceEndpoint(self, meth, params):
|
||||||
|
self._invokedEndpoint=True
|
||||||
|
return jsonrpc.ServiceHandler.invokeServiceEndpoint(self, meth, params)
|
||||||
|
|
||||||
|
def translateResult(self, result, error, id_):
|
||||||
|
self._resultTranslated=True
|
||||||
|
return jsonrpc.ServiceHandler.translateResult(self, result, error, id_)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class TestServiceHandler(unittest.TestCase):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
self.service = Service()
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def test_RequestProcessing(self):
|
||||||
|
handler = Handler(self.service)
|
||||||
|
json=jsonrpc.dumps({"method":"echo", 'params':['foobar'], 'id':''})
|
||||||
|
|
||||||
|
result = handler.handleRequest(json)
|
||||||
|
self.assert_(handler._requestTranslated)
|
||||||
|
self.assert_(handler._foundServiceEndpoint)
|
||||||
|
self.assert_(handler._invokedEndpoint)
|
||||||
|
self.assert_(handler._resultTranslated)
|
||||||
|
|
||||||
|
def test_translateRequest(self):
|
||||||
|
handler = Handler(self.service)
|
||||||
|
json=jsonrpc.dumps({"method":"echo", 'params':['foobar'], 'id':''})
|
||||||
|
req = handler.translateRequest(json)
|
||||||
|
self.assertEquals(req['method'], "echo")
|
||||||
|
self.assertEquals(req['params'],['foobar'])
|
||||||
|
self.assertEquals(req['id'],'')
|
||||||
|
|
||||||
|
def test_findServiceEndpoint(self):
|
||||||
|
handler = Handler(self.service)
|
||||||
|
self.assertRaises(jsonrpc.ServiceMethodNotFound, handler.findServiceEndpoint, "notfound")
|
||||||
|
self.assertRaises(jsonrpc.ServiceMethodNotFound, handler.findServiceEndpoint, "not_a_serviceMethod")
|
||||||
|
meth = handler.findServiceEndpoint("echo")
|
||||||
|
self.assertEquals(self.service.echo, meth)
|
||||||
|
|
||||||
|
def test_invokeEndpoint(self):
|
||||||
|
handler = Handler(self.service)
|
||||||
|
meth = handler.findServiceEndpoint("echo")
|
||||||
|
rslt = handler.invokeServiceEndpoint(meth, ['spam'])
|
||||||
|
self.assertEquals(rslt, 'spam')
|
||||||
|
|
||||||
|
def test_translateResults(self):
|
||||||
|
handler=Handler(self.service)
|
||||||
|
data=handler.translateResult("foobar", None, "spam")
|
||||||
|
self.assertEquals(jsonrpc.loads(data), {"result":"foobar","id":"spam","error":None})
|
||||||
|
|
||||||
|
def test_translateError(self):
|
||||||
|
handler=Handler(self.service)
|
||||||
|
exc = Exception()
|
||||||
|
data=handler.translateResult(None, exc, "id")
|
||||||
|
self.assertEquals(jsonrpc.loads(data), {"result":None,"id":"id","error":{"name":"Exception", "message":""}})
|
||||||
|
|
||||||
|
def test_translateUnencodableResults(self):
|
||||||
|
handler=Handler(self.service)
|
||||||
|
data=handler.translateResult(self, None, "spam")
|
||||||
|
self.assertEquals(jsonrpc.loads(data), {"result":None,"id":"spam","error":{"name":"JSONEncodeException", "message":"Result Object Not Serializable"}})
|
||||||
|
|
||||||
|
def test_handleRequestEcho(self):
|
||||||
|
handler=Handler(self.service)
|
||||||
|
json=jsonrpc.dumps({"method":"echo", 'params':['foobar'], 'id':''})
|
||||||
|
result = handler.handleRequest(json)
|
||||||
|
self.assertEquals(jsonrpc.loads(result), jsonrpc.loads('{"result":"foobar", "error":null, "id":""}'))
|
||||||
|
|
||||||
|
def test_handleRequestMethodNotFound(self):
|
||||||
|
handler=Handler(self.service)
|
||||||
|
json=jsonrpc.dumps({"method":"not_found", 'params':['foobar'], 'id':''})
|
||||||
|
result = handler.handleRequest(json)
|
||||||
|
self.assertEquals(jsonrpc.loads(result), {"result":None, "error":{"name":"ServiceMethodNotFound", "message":""}, "id":""})
|
||||||
|
|
||||||
|
def test_handleRequestMethodNotAllowed(self):
|
||||||
|
handler=Handler(self.service)
|
||||||
|
json=jsonrpc.dumps({"method":"not_a_ServiceMethod", 'params':['foobar'], 'id':''})
|
||||||
|
result = handler.handleRequest(json)
|
||||||
|
self.assertEquals(jsonrpc.loads(result), {"result":None, "error":{"name":"ServiceMethodNotFound", "message":""}, "id":""})
|
||||||
|
|
||||||
|
def test_handleRequestMethodRaiseError(self):
|
||||||
|
handler=Handler(self.service)
|
||||||
|
json=jsonrpc.dumps({"method":"raiseError", 'params':[], 'id':''})
|
||||||
|
result = handler.handleRequest(json)
|
||||||
|
self.assertEquals(jsonrpc.loads(result), {"result":None, "error":{"name":"Exception", "message":"foobar"}, "id":""})
|
||||||
|
|
||||||
|
def test_handleBadRequestData(self):
|
||||||
|
handler=Handler(self.service)
|
||||||
|
json = "This is not a JSON-RPC request"
|
||||||
|
result = handler.handleRequest(json)
|
||||||
|
self.assertEquals(jsonrpc.loads(result), {"result":None, "error":{"name":"ServiceRequestNotTranslatable", "message":json}, "id":""})
|
||||||
|
|
||||||
|
def test_handleBadRequestObject(self):
|
||||||
|
handler=Handler(self.service)
|
||||||
|
json = "{}"
|
||||||
|
result = handler.handleRequest(json)
|
||||||
|
self.assertEquals(jsonrpc.loads(result), {"result":None, "error":{"name":"BadServiceRequest", "message":json}, "id":""})
|
45
icarus-miner/data/usr/lib/python2.6/jsonrpc/cgiwrapper.py
Normal file
45
icarus-miner/data/usr/lib/python2.6/jsonrpc/cgiwrapper.py
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
import sys, os
|
||||||
|
from jsonrpc import ServiceHandler
|
||||||
|
|
||||||
|
class CGIServiceHandler(ServiceHandler):
|
||||||
|
def __init__(self, service):
|
||||||
|
if service == None:
|
||||||
|
import __main__ as service
|
||||||
|
|
||||||
|
ServiceHandler.__init__(self, service)
|
||||||
|
|
||||||
|
def handleRequest(self, fin=None, fout=None, env=None):
|
||||||
|
if fin==None:
|
||||||
|
fin = sys.stdin
|
||||||
|
if fout==None:
|
||||||
|
fout = sys.stdout
|
||||||
|
if env == None:
|
||||||
|
env = os.environ
|
||||||
|
|
||||||
|
try:
|
||||||
|
contLen=int(env['CONTENT_LENGTH'])
|
||||||
|
data = fin.read(contLen)
|
||||||
|
except Exception, e:
|
||||||
|
data = ""
|
||||||
|
|
||||||
|
resultData = ServiceHandler.handleRequest(self, data)
|
||||||
|
|
||||||
|
response = "Content-Type: text/plain\n"
|
||||||
|
response += "Content-Length: %d\n\n" % len(resultData)
|
||||||
|
response += resultData
|
||||||
|
|
||||||
|
#on windows all \n are converted to \r\n if stdout is a terminal and is not set to binary mode :(
|
||||||
|
#this will then cause an incorrect Content-length.
|
||||||
|
#I have only experienced this problem with apache on Win so far.
|
||||||
|
if sys.platform == "win32":
|
||||||
|
try:
|
||||||
|
import msvcrt
|
||||||
|
msvcrt.setmode(fout.fileno(), os.O_BINARY)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
#put out the response
|
||||||
|
fout.write(response)
|
||||||
|
fout.flush()
|
||||||
|
|
||||||
|
def handleCGI(service=None, fin=None, fout=None, env=None):
|
||||||
|
CGIServiceHandler(service).handleRequest(fin, fout, env)
|
BIN
icarus-miner/data/usr/lib/python2.6/jsonrpc/cgiwrapper.pyc
Normal file
BIN
icarus-miner/data/usr/lib/python2.6/jsonrpc/cgiwrapper.pyc
Normal file
Binary file not shown.
230
icarus-miner/data/usr/lib/python2.6/jsonrpc/json.py
Normal file
230
icarus-miner/data/usr/lib/python2.6/jsonrpc/json.py
Normal file
@ -0,0 +1,230 @@
|
|||||||
|
|
||||||
|
"""
|
||||||
|
Copyright (c) 2007 Jan-Klaas Kollhof
|
||||||
|
|
||||||
|
This file is part of jsonrpc.
|
||||||
|
|
||||||
|
jsonrpc is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2.1 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This software is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU Lesser General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU Lesser General Public License
|
||||||
|
along with this software; if not, write to the Free Software
|
||||||
|
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||||
|
"""
|
||||||
|
|
||||||
|
from types import *
|
||||||
|
import re
|
||||||
|
|
||||||
|
CharReplacements ={
|
||||||
|
'\t': '\\t',
|
||||||
|
'\b': '\\b',
|
||||||
|
'\f': '\\f',
|
||||||
|
'\n': '\\n',
|
||||||
|
'\r': '\\r',
|
||||||
|
'\\': '\\\\',
|
||||||
|
'/': '\\/',
|
||||||
|
'"': '\\"'}
|
||||||
|
|
||||||
|
EscapeCharToChar = {
|
||||||
|
't': '\t',
|
||||||
|
'b': '\b',
|
||||||
|
'f': '\f',
|
||||||
|
'n': '\n',
|
||||||
|
'r': '\r',
|
||||||
|
'\\': '\\',
|
||||||
|
'/': '/',
|
||||||
|
'"' : '"'}
|
||||||
|
|
||||||
|
StringEscapeRE= re.compile(r'[\x00-\x19\\"/\b\f\n\r\t]')
|
||||||
|
Digits = ['0', '1', '2','3','4','5','6','7','8','9']
|
||||||
|
|
||||||
|
|
||||||
|
class JSONEncodeException(Exception):
|
||||||
|
def __init__(self, obj):
|
||||||
|
Exception.__init__(self)
|
||||||
|
self.obj = obj
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return "Object not encodeable: %s" % self.obj
|
||||||
|
|
||||||
|
|
||||||
|
class JSONDecodeException(Exception):
|
||||||
|
def __init__(self, message):
|
||||||
|
Exception.__init__(self)
|
||||||
|
self.message = message
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return self.message
|
||||||
|
|
||||||
|
|
||||||
|
def escapeChar(match):
|
||||||
|
c=match.group(0)
|
||||||
|
try:
|
||||||
|
replacement = CharReplacements[c]
|
||||||
|
return replacement
|
||||||
|
except KeyError:
|
||||||
|
d = ord(c)
|
||||||
|
if d < 32:
|
||||||
|
return '\\u%04x' % d
|
||||||
|
else:
|
||||||
|
return c
|
||||||
|
|
||||||
|
def dumps(obj):
|
||||||
|
return unicode("".join([part for part in dumpParts (obj)]))
|
||||||
|
|
||||||
|
def dumpParts (obj):
|
||||||
|
objType = type(obj)
|
||||||
|
if obj == None:
|
||||||
|
yield u'null'
|
||||||
|
elif objType is BooleanType:
|
||||||
|
if obj:
|
||||||
|
yield u'true'
|
||||||
|
else:
|
||||||
|
yield u'false'
|
||||||
|
elif objType is DictionaryType:
|
||||||
|
yield u'{'
|
||||||
|
isFirst=True
|
||||||
|
for (key, value) in obj.items():
|
||||||
|
if isFirst:
|
||||||
|
isFirst=False
|
||||||
|
else:
|
||||||
|
yield u","
|
||||||
|
yield u'"' + StringEscapeRE.sub(escapeChar, key) +u'":'
|
||||||
|
for part in dumpParts (value):
|
||||||
|
yield part
|
||||||
|
yield u'}'
|
||||||
|
elif objType in StringTypes:
|
||||||
|
yield u'"' + StringEscapeRE.sub(escapeChar, obj) +u'"'
|
||||||
|
|
||||||
|
elif objType in [TupleType, ListType, GeneratorType]:
|
||||||
|
yield u'['
|
||||||
|
isFirst=True
|
||||||
|
for item in obj:
|
||||||
|
if isFirst:
|
||||||
|
isFirst=False
|
||||||
|
else:
|
||||||
|
yield u","
|
||||||
|
for part in dumpParts (item):
|
||||||
|
yield part
|
||||||
|
yield u']'
|
||||||
|
elif objType in [IntType, LongType, FloatType]:
|
||||||
|
yield unicode(obj)
|
||||||
|
else:
|
||||||
|
raise JSONEncodeException(obj)
|
||||||
|
|
||||||
|
|
||||||
|
def loads(s):
|
||||||
|
stack = []
|
||||||
|
chars = iter(s)
|
||||||
|
value = None
|
||||||
|
currCharIsNext=False
|
||||||
|
|
||||||
|
try:
|
||||||
|
while(1):
|
||||||
|
skip = False
|
||||||
|
if not currCharIsNext:
|
||||||
|
c = chars.next()
|
||||||
|
while(c in [' ', '\t', '\r','\n']):
|
||||||
|
c = chars.next()
|
||||||
|
currCharIsNext=False
|
||||||
|
if c=='"':
|
||||||
|
value = ''
|
||||||
|
try:
|
||||||
|
c=chars.next()
|
||||||
|
while c != '"':
|
||||||
|
if c == '\\':
|
||||||
|
c=chars.next()
|
||||||
|
try:
|
||||||
|
value+=EscapeCharToChar[c]
|
||||||
|
except KeyError:
|
||||||
|
if c == 'u':
|
||||||
|
hexCode = chars.next() + chars.next() + chars.next() + chars.next()
|
||||||
|
value += unichr(int(hexCode,16))
|
||||||
|
else:
|
||||||
|
raise JSONDecodeException("Bad Escape Sequence Found")
|
||||||
|
else:
|
||||||
|
value+=c
|
||||||
|
c=chars.next()
|
||||||
|
except StopIteration:
|
||||||
|
raise JSONDecodeException("Expected end of String")
|
||||||
|
elif c == '{':
|
||||||
|
stack.append({})
|
||||||
|
skip=True
|
||||||
|
elif c =='}':
|
||||||
|
value = stack.pop()
|
||||||
|
elif c == '[':
|
||||||
|
stack.append([])
|
||||||
|
skip=True
|
||||||
|
elif c == ']':
|
||||||
|
value = stack.pop()
|
||||||
|
elif c in [',',':']:
|
||||||
|
skip=True
|
||||||
|
elif c in Digits or c == '-':
|
||||||
|
digits=[c]
|
||||||
|
c = chars.next()
|
||||||
|
numConv = int
|
||||||
|
try:
|
||||||
|
while c in Digits:
|
||||||
|
digits.append(c)
|
||||||
|
c = chars.next()
|
||||||
|
if c == ".":
|
||||||
|
numConv=float
|
||||||
|
digits.append(c)
|
||||||
|
c = chars.next()
|
||||||
|
while c in Digits:
|
||||||
|
digits.append(c)
|
||||||
|
c = chars.next()
|
||||||
|
if c.upper() == 'E':
|
||||||
|
digits.append(c)
|
||||||
|
c = chars.next()
|
||||||
|
if c in ['+','-']:
|
||||||
|
digits.append(c)
|
||||||
|
c = chars.next()
|
||||||
|
while c in Digits:
|
||||||
|
digits.append(c)
|
||||||
|
c = chars.next()
|
||||||
|
else:
|
||||||
|
raise JSONDecodeException("Expected + or -")
|
||||||
|
except StopIteration:
|
||||||
|
pass
|
||||||
|
value = numConv("".join(digits))
|
||||||
|
currCharIsNext=True
|
||||||
|
|
||||||
|
elif c in ['t','f','n']:
|
||||||
|
kw = c+ chars.next() + chars.next() + chars.next()
|
||||||
|
if kw == 'null':
|
||||||
|
value = None
|
||||||
|
elif kw == 'true':
|
||||||
|
value = True
|
||||||
|
elif kw == 'fals' and chars.next() == 'e':
|
||||||
|
value = False
|
||||||
|
else:
|
||||||
|
raise JSONDecodeException('Expected Null, False or True')
|
||||||
|
else:
|
||||||
|
raise JSONDecodeException('Expected []{}," or Number, Null, False or True')
|
||||||
|
|
||||||
|
if not skip:
|
||||||
|
if len(stack):
|
||||||
|
top = stack[-1]
|
||||||
|
if type(top) is ListType:
|
||||||
|
top.append(value)
|
||||||
|
elif type(top) is DictionaryType:
|
||||||
|
stack.append(value)
|
||||||
|
elif type(top) in StringTypes:
|
||||||
|
key = stack.pop()
|
||||||
|
stack[-1][key] = value
|
||||||
|
else:
|
||||||
|
raise JSONDecodeException("Expected dictionary key, or start of a value")
|
||||||
|
else:
|
||||||
|
return value
|
||||||
|
except StopIteration:
|
||||||
|
raise JSONDecodeException("Unexpected end of JSON source")
|
||||||
|
|
||||||
|
|
BIN
icarus-miner/data/usr/lib/python2.6/jsonrpc/json.pyc
Normal file
BIN
icarus-miner/data/usr/lib/python2.6/jsonrpc/json.pyc
Normal file
Binary file not shown.
52
icarus-miner/data/usr/lib/python2.6/jsonrpc/modpywrapper.py
Normal file
52
icarus-miner/data/usr/lib/python2.6/jsonrpc/modpywrapper.py
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
import sys, os
|
||||||
|
from jsonrpc import ServiceHandler, ServiceException
|
||||||
|
|
||||||
|
|
||||||
|
class ServiceImplementaionNotFound(ServiceException):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class ModPyServiceHandler(ServiceHandler):
|
||||||
|
def __init__(self, req):
|
||||||
|
self.req = req
|
||||||
|
ServiceHandler.__init__(self, None)
|
||||||
|
|
||||||
|
|
||||||
|
def findServiceEndpoint(self, name):
|
||||||
|
req = self.req
|
||||||
|
|
||||||
|
(modulePath, fileName) = os.path.split(req.filename)
|
||||||
|
(moduleName, ext) = os.path.splitext(fileName)
|
||||||
|
|
||||||
|
if not os.path.exists(os.path.join(modulePath, moduleName + ".py")):
|
||||||
|
raise ServiceImplementaionNotFound()
|
||||||
|
else:
|
||||||
|
if not modulePath in sys.path:
|
||||||
|
sys.path.insert(0, modulePath)
|
||||||
|
|
||||||
|
from mod_python import apache
|
||||||
|
module = apache.import_module(moduleName, log=1)
|
||||||
|
|
||||||
|
if hasattr(module, "service"):
|
||||||
|
self.service = module.service
|
||||||
|
elif hasattr(module, "Service"):
|
||||||
|
self.service = module.Service()
|
||||||
|
else:
|
||||||
|
self.service = module
|
||||||
|
|
||||||
|
return ServiceHandler.findServiceEndpoint(self, name)
|
||||||
|
|
||||||
|
|
||||||
|
def handleRequest(self, data):
|
||||||
|
self.req.content_type = "text/plain"
|
||||||
|
data = self.req.read()
|
||||||
|
resultData = ServiceHandler.handleRequest(self, data)
|
||||||
|
self.req.write(resultData)
|
||||||
|
self.req.flush()
|
||||||
|
|
||||||
|
def handler(req):
|
||||||
|
from mod_python import apache
|
||||||
|
ModPyServiceHandler(req).handleRequest(req)
|
||||||
|
return apache.OK
|
||||||
|
|
||||||
|
|
BIN
icarus-miner/data/usr/lib/python2.6/jsonrpc/modpywrapper.pyc
Normal file
BIN
icarus-miner/data/usr/lib/python2.6/jsonrpc/modpywrapper.pyc
Normal file
Binary file not shown.
49
icarus-miner/data/usr/lib/python2.6/jsonrpc/proxy.py
Normal file
49
icarus-miner/data/usr/lib/python2.6/jsonrpc/proxy.py
Normal file
@ -0,0 +1,49 @@
|
|||||||
|
|
||||||
|
"""
|
||||||
|
Copyright (c) 2007 Jan-Klaas Kollhof
|
||||||
|
|
||||||
|
This file is part of jsonrpc.
|
||||||
|
|
||||||
|
jsonrpc is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2.1 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This software is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU Lesser General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU Lesser General Public License
|
||||||
|
along with this software; if not, write to the Free Software
|
||||||
|
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||||
|
"""
|
||||||
|
|
||||||
|
import urllib
|
||||||
|
from jsonrpc.json import dumps, loads
|
||||||
|
|
||||||
|
class JSONRPCException(Exception):
|
||||||
|
def __init__(self, rpcError):
|
||||||
|
Exception.__init__(self)
|
||||||
|
self.error = rpcError
|
||||||
|
|
||||||
|
class ServiceProxy(object):
|
||||||
|
def __init__(self, serviceURL, serviceName=None):
|
||||||
|
self.__serviceURL = serviceURL
|
||||||
|
self.__serviceName = serviceName
|
||||||
|
|
||||||
|
def __getattr__(self, name):
|
||||||
|
if self.__serviceName != None:
|
||||||
|
name = "%s.%s" % (self.__serviceName, name)
|
||||||
|
return ServiceProxy(self.__serviceURL, name)
|
||||||
|
|
||||||
|
def __call__(self, *args):
|
||||||
|
postdata = dumps({"method": self.__serviceName, 'params': args, 'id':'jsonrpc'})
|
||||||
|
respdata = urllib.urlopen(self.__serviceURL, postdata).read()
|
||||||
|
resp = loads(respdata)
|
||||||
|
if resp['error'] != None:
|
||||||
|
raise JSONRPCException(resp['error'])
|
||||||
|
else:
|
||||||
|
return resp['result']
|
||||||
|
|
||||||
|
|
BIN
icarus-miner/data/usr/lib/python2.6/jsonrpc/proxy.pyc
Normal file
BIN
icarus-miner/data/usr/lib/python2.6/jsonrpc/proxy.pyc
Normal file
Binary file not shown.
113
icarus-miner/data/usr/lib/python2.6/jsonrpc/serviceHandler.py
Normal file
113
icarus-miner/data/usr/lib/python2.6/jsonrpc/serviceHandler.py
Normal file
@ -0,0 +1,113 @@
|
|||||||
|
|
||||||
|
"""
|
||||||
|
Copyright (c) 2007 Jan-Klaas Kollhof
|
||||||
|
|
||||||
|
This file is part of jsonrpc.
|
||||||
|
|
||||||
|
jsonrpc is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2.1 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This software is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU Lesser General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU Lesser General Public License
|
||||||
|
along with this software; if not, write to the Free Software
|
||||||
|
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||||
|
"""
|
||||||
|
|
||||||
|
from jsonrpc import loads, dumps, JSONEncodeException
|
||||||
|
|
||||||
|
|
||||||
|
def ServiceMethod(fn):
|
||||||
|
fn.IsServiceMethod = True
|
||||||
|
return fn
|
||||||
|
|
||||||
|
class ServiceException(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
class ServiceRequestNotTranslatable(ServiceException):
|
||||||
|
pass
|
||||||
|
|
||||||
|
class BadServiceRequest(ServiceException):
|
||||||
|
pass
|
||||||
|
|
||||||
|
class ServiceMethodNotFound(ServiceException):
|
||||||
|
def __init__(self, name):
|
||||||
|
self.methodName=name
|
||||||
|
|
||||||
|
class ServiceHandler(object):
|
||||||
|
|
||||||
|
def __init__(self, service):
|
||||||
|
self.service=service
|
||||||
|
|
||||||
|
def handleRequest(self, json):
|
||||||
|
err=None
|
||||||
|
result = None
|
||||||
|
id_=''
|
||||||
|
|
||||||
|
try:
|
||||||
|
req = self.translateRequest(json)
|
||||||
|
except ServiceRequestNotTranslatable, e:
|
||||||
|
err = e
|
||||||
|
req={'id':id_}
|
||||||
|
|
||||||
|
if err==None:
|
||||||
|
try:
|
||||||
|
id_ = req['id']
|
||||||
|
methName = req['method']
|
||||||
|
args = req['params']
|
||||||
|
except:
|
||||||
|
err = BadServiceRequest(json)
|
||||||
|
|
||||||
|
if err == None:
|
||||||
|
try:
|
||||||
|
meth = self.findServiceEndpoint(methName)
|
||||||
|
except Exception, e:
|
||||||
|
err = e
|
||||||
|
|
||||||
|
if err == None:
|
||||||
|
try:
|
||||||
|
result = self.invokeServiceEndpoint(meth, args)
|
||||||
|
except Exception, e:
|
||||||
|
err = e
|
||||||
|
|
||||||
|
resultdata = self.translateResult(result, err, id_)
|
||||||
|
|
||||||
|
return resultdata
|
||||||
|
|
||||||
|
def translateRequest(self, data):
|
||||||
|
try:
|
||||||
|
req = loads(data)
|
||||||
|
except:
|
||||||
|
raise ServiceRequestNotTranslatable(data)
|
||||||
|
return req
|
||||||
|
|
||||||
|
def findServiceEndpoint(self, name):
|
||||||
|
try:
|
||||||
|
meth = getattr(self.service, name)
|
||||||
|
if getattr(meth, "IsServiceMethod"):
|
||||||
|
return meth
|
||||||
|
else:
|
||||||
|
raise ServiceMethodNotFound(name)
|
||||||
|
except AttributeError:
|
||||||
|
raise ServiceMethodNotFound(name)
|
||||||
|
|
||||||
|
def invokeServiceEndpoint(self, meth, args):
|
||||||
|
return meth(*args)
|
||||||
|
|
||||||
|
def translateResult(self, rslt, err, id_):
|
||||||
|
if err != None:
|
||||||
|
err = {"name": err.__class__.__name__, "message":err.message}
|
||||||
|
rslt = None
|
||||||
|
|
||||||
|
try:
|
||||||
|
data = dumps({"result":rslt,"id":id_,"error":err})
|
||||||
|
except JSONEncodeException, e:
|
||||||
|
err = {"name": "JSONEncodeException", "message":"Result Object Not Serializable"}
|
||||||
|
data = dumps({"result":None, "id":id_,"error":err})
|
||||||
|
|
||||||
|
return data
|
BIN
icarus-miner/data/usr/lib/python2.6/jsonrpc/serviceHandler.pyc
Normal file
BIN
icarus-miner/data/usr/lib/python2.6/jsonrpc/serviceHandler.pyc
Normal file
Binary file not shown.
93
icarus-miner/data/usr/lib/python2.6/keyword.py
Executable file
93
icarus-miner/data/usr/lib/python2.6/keyword.py
Executable file
@ -0,0 +1,93 @@
|
|||||||
|
#! /usr/bin/env python
|
||||||
|
|
||||||
|
"""Keywords (from "graminit.c")
|
||||||
|
|
||||||
|
This file is automatically generated; please don't muck it up!
|
||||||
|
|
||||||
|
To update the symbols in this file, 'cd' to the top directory of
|
||||||
|
the python source tree after building the interpreter and run:
|
||||||
|
|
||||||
|
python Lib/keyword.py
|
||||||
|
"""
|
||||||
|
|
||||||
|
__all__ = ["iskeyword", "kwlist"]
|
||||||
|
|
||||||
|
kwlist = [
|
||||||
|
#--start keywords--
|
||||||
|
'and',
|
||||||
|
'as',
|
||||||
|
'assert',
|
||||||
|
'break',
|
||||||
|
'class',
|
||||||
|
'continue',
|
||||||
|
'def',
|
||||||
|
'del',
|
||||||
|
'elif',
|
||||||
|
'else',
|
||||||
|
'except',
|
||||||
|
'exec',
|
||||||
|
'finally',
|
||||||
|
'for',
|
||||||
|
'from',
|
||||||
|
'global',
|
||||||
|
'if',
|
||||||
|
'import',
|
||||||
|
'in',
|
||||||
|
'is',
|
||||||
|
'lambda',
|
||||||
|
'not',
|
||||||
|
'or',
|
||||||
|
'pass',
|
||||||
|
'print',
|
||||||
|
'raise',
|
||||||
|
'return',
|
||||||
|
'try',
|
||||||
|
'while',
|
||||||
|
'with',
|
||||||
|
'yield',
|
||||||
|
#--end keywords--
|
||||||
|
]
|
||||||
|
|
||||||
|
iskeyword = frozenset(kwlist).__contains__
|
||||||
|
|
||||||
|
def main():
|
||||||
|
import sys, re
|
||||||
|
|
||||||
|
args = sys.argv[1:]
|
||||||
|
iptfile = args and args[0] or "Python/graminit.c"
|
||||||
|
if len(args) > 1: optfile = args[1]
|
||||||
|
else: optfile = "Lib/keyword.py"
|
||||||
|
|
||||||
|
# scan the source file for keywords
|
||||||
|
fp = open(iptfile)
|
||||||
|
strprog = re.compile('"([^"]+)"')
|
||||||
|
lines = []
|
||||||
|
for line in fp:
|
||||||
|
if '{1, "' in line:
|
||||||
|
match = strprog.search(line)
|
||||||
|
if match:
|
||||||
|
lines.append(" '" + match.group(1) + "',\n")
|
||||||
|
fp.close()
|
||||||
|
lines.sort()
|
||||||
|
|
||||||
|
# load the output skeleton from the target
|
||||||
|
fp = open(optfile)
|
||||||
|
format = fp.readlines()
|
||||||
|
fp.close()
|
||||||
|
|
||||||
|
# insert the lines of keywords
|
||||||
|
try:
|
||||||
|
start = format.index("#--start keywords--\n") + 1
|
||||||
|
end = format.index("#--end keywords--\n")
|
||||||
|
format[start:end] = lines
|
||||||
|
except ValueError:
|
||||||
|
sys.stderr.write("target does not contain format markers\n")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# write the output file
|
||||||
|
fp = open(optfile, 'w')
|
||||||
|
fp.write(''.join(format))
|
||||||
|
fp.close()
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
BIN
icarus-miner/data/usr/lib/python2.6/keyword.pyc
Normal file
BIN
icarus-miner/data/usr/lib/python2.6/keyword.pyc
Normal file
Binary file not shown.
128
icarus-miner/data/usr/lib/python2.6/lib-dynload/__future__.py
Normal file
128
icarus-miner/data/usr/lib/python2.6/lib-dynload/__future__.py
Normal file
@ -0,0 +1,128 @@
|
|||||||
|
"""Record of phased-in incompatible language changes.
|
||||||
|
|
||||||
|
Each line is of the form:
|
||||||
|
|
||||||
|
FeatureName = "_Feature(" OptionalRelease "," MandatoryRelease ","
|
||||||
|
CompilerFlag ")"
|
||||||
|
|
||||||
|
where, normally, OptionalRelease < MandatoryRelease, and both are 5-tuples
|
||||||
|
of the same form as sys.version_info:
|
||||||
|
|
||||||
|
(PY_MAJOR_VERSION, # the 2 in 2.1.0a3; an int
|
||||||
|
PY_MINOR_VERSION, # the 1; an int
|
||||||
|
PY_MICRO_VERSION, # the 0; an int
|
||||||
|
PY_RELEASE_LEVEL, # "alpha", "beta", "candidate" or "final"; string
|
||||||
|
PY_RELEASE_SERIAL # the 3; an int
|
||||||
|
)
|
||||||
|
|
||||||
|
OptionalRelease records the first release in which
|
||||||
|
|
||||||
|
from __future__ import FeatureName
|
||||||
|
|
||||||
|
was accepted.
|
||||||
|
|
||||||
|
In the case of MandatoryReleases that have not yet occurred,
|
||||||
|
MandatoryRelease predicts the release in which the feature will become part
|
||||||
|
of the language.
|
||||||
|
|
||||||
|
Else MandatoryRelease records when the feature became part of the language;
|
||||||
|
in releases at or after that, modules no longer need
|
||||||
|
|
||||||
|
from __future__ import FeatureName
|
||||||
|
|
||||||
|
to use the feature in question, but may continue to use such imports.
|
||||||
|
|
||||||
|
MandatoryRelease may also be None, meaning that a planned feature got
|
||||||
|
dropped.
|
||||||
|
|
||||||
|
Instances of class _Feature have two corresponding methods,
|
||||||
|
.getOptionalRelease() and .getMandatoryRelease().
|
||||||
|
|
||||||
|
CompilerFlag is the (bitfield) flag that should be passed in the fourth
|
||||||
|
argument to the builtin function compile() to enable the feature in
|
||||||
|
dynamically compiled code. This flag is stored in the .compiler_flag
|
||||||
|
attribute on _Future instances. These values must match the appropriate
|
||||||
|
#defines of CO_xxx flags in Include/compile.h.
|
||||||
|
|
||||||
|
No feature line is ever to be deleted from this file.
|
||||||
|
"""
|
||||||
|
|
||||||
|
all_feature_names = [
|
||||||
|
"nested_scopes",
|
||||||
|
"generators",
|
||||||
|
"division",
|
||||||
|
"absolute_import",
|
||||||
|
"with_statement",
|
||||||
|
"print_function",
|
||||||
|
"unicode_literals",
|
||||||
|
]
|
||||||
|
|
||||||
|
__all__ = ["all_feature_names"] + all_feature_names
|
||||||
|
|
||||||
|
# The CO_xxx symbols are defined here under the same names used by
|
||||||
|
# compile.h, so that an editor search will find them here. However,
|
||||||
|
# they're not exported in __all__, because they don't really belong to
|
||||||
|
# this module.
|
||||||
|
CO_NESTED = 0x0010 # nested_scopes
|
||||||
|
CO_GENERATOR_ALLOWED = 0 # generators (obsolete, was 0x1000)
|
||||||
|
CO_FUTURE_DIVISION = 0x2000 # division
|
||||||
|
CO_FUTURE_ABSOLUTE_IMPORT = 0x4000 # perform absolute imports by default
|
||||||
|
CO_FUTURE_WITH_STATEMENT = 0x8000 # with statement
|
||||||
|
CO_FUTURE_PRINT_FUNCTION = 0x10000 # print function
|
||||||
|
CO_FUTURE_UNICODE_LITERALS = 0x20000 # unicode string literals
|
||||||
|
|
||||||
|
class _Feature:
|
||||||
|
def __init__(self, optionalRelease, mandatoryRelease, compiler_flag):
|
||||||
|
self.optional = optionalRelease
|
||||||
|
self.mandatory = mandatoryRelease
|
||||||
|
self.compiler_flag = compiler_flag
|
||||||
|
|
||||||
|
def getOptionalRelease(self):
|
||||||
|
"""Return first release in which this feature was recognized.
|
||||||
|
|
||||||
|
This is a 5-tuple, of the same form as sys.version_info.
|
||||||
|
"""
|
||||||
|
|
||||||
|
return self.optional
|
||||||
|
|
||||||
|
def getMandatoryRelease(self):
|
||||||
|
"""Return release in which this feature will become mandatory.
|
||||||
|
|
||||||
|
This is a 5-tuple, of the same form as sys.version_info, or, if
|
||||||
|
the feature was dropped, is None.
|
||||||
|
"""
|
||||||
|
|
||||||
|
return self.mandatory
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "_Feature" + repr((self.optional,
|
||||||
|
self.mandatory,
|
||||||
|
self.compiler_flag))
|
||||||
|
|
||||||
|
nested_scopes = _Feature((2, 1, 0, "beta", 1),
|
||||||
|
(2, 2, 0, "alpha", 0),
|
||||||
|
CO_NESTED)
|
||||||
|
|
||||||
|
generators = _Feature((2, 2, 0, "alpha", 1),
|
||||||
|
(2, 3, 0, "final", 0),
|
||||||
|
CO_GENERATOR_ALLOWED)
|
||||||
|
|
||||||
|
division = _Feature((2, 2, 0, "alpha", 2),
|
||||||
|
(3, 0, 0, "alpha", 0),
|
||||||
|
CO_FUTURE_DIVISION)
|
||||||
|
|
||||||
|
absolute_import = _Feature((2, 5, 0, "alpha", 1),
|
||||||
|
(2, 7, 0, "alpha", 0),
|
||||||
|
CO_FUTURE_ABSOLUTE_IMPORT)
|
||||||
|
|
||||||
|
with_statement = _Feature((2, 5, 0, "alpha", 1),
|
||||||
|
(2, 6, 0, "alpha", 0),
|
||||||
|
CO_FUTURE_WITH_STATEMENT)
|
||||||
|
|
||||||
|
print_function = _Feature((2, 6, 0, "alpha", 2),
|
||||||
|
(3, 0, 0, "alpha", 0),
|
||||||
|
CO_FUTURE_PRINT_FUNCTION)
|
||||||
|
|
||||||
|
unicode_literals = _Feature((2, 6, 0, "alpha", 2),
|
||||||
|
(3, 0, 0, "alpha", 0),
|
||||||
|
CO_FUTURE_UNICODE_LITERALS)
|
BIN
icarus-miner/data/usr/lib/python2.6/lib-dynload/__future__.pyc
Normal file
BIN
icarus-miner/data/usr/lib/python2.6/lib-dynload/__future__.pyc
Normal file
Binary file not shown.
BIN
icarus-miner/data/usr/lib/python2.6/lib-dynload/_bisect.so
Executable file
BIN
icarus-miner/data/usr/lib/python2.6/lib-dynload/_bisect.so
Executable file
Binary file not shown.
BIN
icarus-miner/data/usr/lib/python2.6/lib-dynload/_collections.so
Executable file
BIN
icarus-miner/data/usr/lib/python2.6/lib-dynload/_collections.so
Executable file
Binary file not shown.
BIN
icarus-miner/data/usr/lib/python2.6/lib-dynload/_functools.so
Executable file
BIN
icarus-miner/data/usr/lib/python2.6/lib-dynload/_functools.so
Executable file
Binary file not shown.
BIN
icarus-miner/data/usr/lib/python2.6/lib-dynload/_random.so
Executable file
BIN
icarus-miner/data/usr/lib/python2.6/lib-dynload/_random.so
Executable file
Binary file not shown.
BIN
icarus-miner/data/usr/lib/python2.6/lib-dynload/_socket.so
Executable file
BIN
icarus-miner/data/usr/lib/python2.6/lib-dynload/_socket.so
Executable file
Binary file not shown.
BIN
icarus-miner/data/usr/lib/python2.6/lib-dynload/_struct.so
Executable file
BIN
icarus-miner/data/usr/lib/python2.6/lib-dynload/_struct.so
Executable file
Binary file not shown.
BIN
icarus-miner/data/usr/lib/python2.6/lib-dynload/array.so
Executable file
BIN
icarus-miner/data/usr/lib/python2.6/lib-dynload/array.so
Executable file
Binary file not shown.
BIN
icarus-miner/data/usr/lib/python2.6/lib-dynload/binascii.so
Executable file
BIN
icarus-miner/data/usr/lib/python2.6/lib-dynload/binascii.so
Executable file
Binary file not shown.
BIN
icarus-miner/data/usr/lib/python2.6/lib-dynload/fcntl.so
Executable file
BIN
icarus-miner/data/usr/lib/python2.6/lib-dynload/fcntl.so
Executable file
Binary file not shown.
BIN
icarus-miner/data/usr/lib/python2.6/lib-dynload/itertools.so
Executable file
BIN
icarus-miner/data/usr/lib/python2.6/lib-dynload/itertools.so
Executable file
Binary file not shown.
BIN
icarus-miner/data/usr/lib/python2.6/lib-dynload/math.so
Executable file
BIN
icarus-miner/data/usr/lib/python2.6/lib-dynload/math.so
Executable file
Binary file not shown.
BIN
icarus-miner/data/usr/lib/python2.6/lib-dynload/operator.so
Executable file
BIN
icarus-miner/data/usr/lib/python2.6/lib-dynload/operator.so
Executable file
Binary file not shown.
898
icarus-miner/data/usr/lib/python2.6/lib-dynload/random.py
Normal file
898
icarus-miner/data/usr/lib/python2.6/lib-dynload/random.py
Normal file
@ -0,0 +1,898 @@
|
|||||||
|
"""Random variable generators.
|
||||||
|
|
||||||
|
integers
|
||||||
|
--------
|
||||||
|
uniform within range
|
||||||
|
|
||||||
|
sequences
|
||||||
|
---------
|
||||||
|
pick random element
|
||||||
|
pick random sample
|
||||||
|
generate random permutation
|
||||||
|
|
||||||
|
distributions on the real line:
|
||||||
|
------------------------------
|
||||||
|
uniform
|
||||||
|
triangular
|
||||||
|
normal (Gaussian)
|
||||||
|
lognormal
|
||||||
|
negative exponential
|
||||||
|
gamma
|
||||||
|
beta
|
||||||
|
pareto
|
||||||
|
Weibull
|
||||||
|
|
||||||
|
distributions on the circle (angles 0 to 2pi)
|
||||||
|
---------------------------------------------
|
||||||
|
circular uniform
|
||||||
|
von Mises
|
||||||
|
|
||||||
|
General notes on the underlying Mersenne Twister core generator:
|
||||||
|
|
||||||
|
* The period is 2**19937-1.
|
||||||
|
* It is one of the most extensively tested generators in existence.
|
||||||
|
* Without a direct way to compute N steps forward, the semantics of
|
||||||
|
jumpahead(n) are weakened to simply jump to another distant state and rely
|
||||||
|
on the large period to avoid overlapping sequences.
|
||||||
|
* The random() method is implemented in C, executes in a single Python step,
|
||||||
|
and is, therefore, threadsafe.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import division
|
||||||
|
from warnings import warn as _warn
|
||||||
|
from types import MethodType as _MethodType, BuiltinMethodType as _BuiltinMethodType
|
||||||
|
from math import log as _log, exp as _exp, pi as _pi, e as _e, ceil as _ceil
|
||||||
|
from math import sqrt as _sqrt, acos as _acos, cos as _cos, sin as _sin
|
||||||
|
from os import urandom as _urandom
|
||||||
|
from binascii import hexlify as _hexlify
|
||||||
|
|
||||||
|
__all__ = ["Random","seed","random","uniform","randint","choice","sample",
|
||||||
|
"randrange","shuffle","normalvariate","lognormvariate",
|
||||||
|
"expovariate","vonmisesvariate","gammavariate","triangular",
|
||||||
|
"gauss","betavariate","paretovariate","weibullvariate",
|
||||||
|
"getstate","setstate","jumpahead", "WichmannHill", "getrandbits",
|
||||||
|
"SystemRandom"]
|
||||||
|
|
||||||
|
NV_MAGICCONST = 4 * _exp(-0.5)/_sqrt(2.0)
|
||||||
|
TWOPI = 2.0*_pi
|
||||||
|
LOG4 = _log(4.0)
|
||||||
|
SG_MAGICCONST = 1.0 + _log(4.5)
|
||||||
|
BPF = 53 # Number of bits in a float
|
||||||
|
RECIP_BPF = 2**-BPF
|
||||||
|
|
||||||
|
|
||||||
|
# Translated by Guido van Rossum from C source provided by
|
||||||
|
# Adrian Baddeley. Adapted by Raymond Hettinger for use with
|
||||||
|
# the Mersenne Twister and os.urandom() core generators.
|
||||||
|
|
||||||
|
import _random
|
||||||
|
|
||||||
|
class Random(_random.Random):
|
||||||
|
"""Random number generator base class used by bound module functions.
|
||||||
|
|
||||||
|
Used to instantiate instances of Random to get generators that don't
|
||||||
|
share state. Especially useful for multi-threaded programs, creating
|
||||||
|
a different instance of Random for each thread, and using the jumpahead()
|
||||||
|
method to ensure that the generated sequences seen by each thread don't
|
||||||
|
overlap.
|
||||||
|
|
||||||
|
Class Random can also be subclassed if you want to use a different basic
|
||||||
|
generator of your own devising: in that case, override the following
|
||||||
|
methods: random(), seed(), getstate(), setstate() and jumpahead().
|
||||||
|
Optionally, implement a getrandbits() method so that randrange() can cover
|
||||||
|
arbitrarily large ranges.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
VERSION = 3 # used by getstate/setstate
|
||||||
|
|
||||||
|
def __init__(self, x=None):
|
||||||
|
"""Initialize an instance.
|
||||||
|
|
||||||
|
Optional argument x controls seeding, as for Random.seed().
|
||||||
|
"""
|
||||||
|
|
||||||
|
self.seed(x)
|
||||||
|
self.gauss_next = None
|
||||||
|
|
||||||
|
def seed(self, a=None):
|
||||||
|
"""Initialize internal state from hashable object.
|
||||||
|
|
||||||
|
None or no argument seeds from current time or from an operating
|
||||||
|
system specific randomness source if available.
|
||||||
|
|
||||||
|
If a is not None or an int or long, hash(a) is used instead.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if a is None:
|
||||||
|
try:
|
||||||
|
a = long(_hexlify(_urandom(16)), 16)
|
||||||
|
except NotImplementedError:
|
||||||
|
import time
|
||||||
|
a = long(time.time() * 256) # use fractional seconds
|
||||||
|
|
||||||
|
super(Random, self).seed(a)
|
||||||
|
self.gauss_next = None
|
||||||
|
|
||||||
|
def getstate(self):
|
||||||
|
"""Return internal state; can be passed to setstate() later."""
|
||||||
|
return self.VERSION, super(Random, self).getstate(), self.gauss_next
|
||||||
|
|
||||||
|
def setstate(self, state):
|
||||||
|
"""Restore internal state from object returned by getstate()."""
|
||||||
|
version = state[0]
|
||||||
|
if version == 3:
|
||||||
|
version, internalstate, self.gauss_next = state
|
||||||
|
super(Random, self).setstate(internalstate)
|
||||||
|
elif version == 2:
|
||||||
|
version, internalstate, self.gauss_next = state
|
||||||
|
# In version 2, the state was saved as signed ints, which causes
|
||||||
|
# inconsistencies between 32/64-bit systems. The state is
|
||||||
|
# really unsigned 32-bit ints, so we convert negative ints from
|
||||||
|
# version 2 to positive longs for version 3.
|
||||||
|
try:
|
||||||
|
internalstate = tuple( long(x) % (2**32) for x in internalstate )
|
||||||
|
except ValueError, e:
|
||||||
|
raise TypeError, e
|
||||||
|
super(Random, self).setstate(internalstate)
|
||||||
|
else:
|
||||||
|
raise ValueError("state with version %s passed to "
|
||||||
|
"Random.setstate() of version %s" %
|
||||||
|
(version, self.VERSION))
|
||||||
|
|
||||||
|
## ---- Methods below this point do not need to be overridden when
|
||||||
|
## ---- subclassing for the purpose of using a different core generator.
|
||||||
|
|
||||||
|
## -------------------- pickle support -------------------
|
||||||
|
|
||||||
|
def __getstate__(self): # for pickle
|
||||||
|
return self.getstate()
|
||||||
|
|
||||||
|
def __setstate__(self, state): # for pickle
|
||||||
|
self.setstate(state)
|
||||||
|
|
||||||
|
def __reduce__(self):
|
||||||
|
return self.__class__, (), self.getstate()
|
||||||
|
|
||||||
|
## -------------------- integer methods -------------------
|
||||||
|
|
||||||
|
def randrange(self, start, stop=None, step=1, int=int, default=None,
|
||||||
|
maxwidth=1L<<BPF):
|
||||||
|
"""Choose a random item from range(start, stop[, step]).
|
||||||
|
|
||||||
|
This fixes the problem with randint() which includes the
|
||||||
|
endpoint; in Python this is usually not what you want.
|
||||||
|
Do not supply the 'int', 'default', and 'maxwidth' arguments.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# This code is a bit messy to make it fast for the
|
||||||
|
# common case while still doing adequate error checking.
|
||||||
|
istart = int(start)
|
||||||
|
if istart != start:
|
||||||
|
raise ValueError, "non-integer arg 1 for randrange()"
|
||||||
|
if stop is default:
|
||||||
|
if istart > 0:
|
||||||
|
if istart >= maxwidth:
|
||||||
|
return self._randbelow(istart)
|
||||||
|
return int(self.random() * istart)
|
||||||
|
raise ValueError, "empty range for randrange()"
|
||||||
|
|
||||||
|
# stop argument supplied.
|
||||||
|
istop = int(stop)
|
||||||
|
if istop != stop:
|
||||||
|
raise ValueError, "non-integer stop for randrange()"
|
||||||
|
width = istop - istart
|
||||||
|
if step == 1 and width > 0:
|
||||||
|
# Note that
|
||||||
|
# int(istart + self.random()*width)
|
||||||
|
# instead would be incorrect. For example, consider istart
|
||||||
|
# = -2 and istop = 0. Then the guts would be in
|
||||||
|
# -2.0 to 0.0 exclusive on both ends (ignoring that random()
|
||||||
|
# might return 0.0), and because int() truncates toward 0, the
|
||||||
|
# final result would be -1 or 0 (instead of -2 or -1).
|
||||||
|
# istart + int(self.random()*width)
|
||||||
|
# would also be incorrect, for a subtler reason: the RHS
|
||||||
|
# can return a long, and then randrange() would also return
|
||||||
|
# a long, but we're supposed to return an int (for backward
|
||||||
|
# compatibility).
|
||||||
|
|
||||||
|
if width >= maxwidth:
|
||||||
|
return int(istart + self._randbelow(width))
|
||||||
|
return int(istart + int(self.random()*width))
|
||||||
|
if step == 1:
|
||||||
|
raise ValueError, "empty range for randrange() (%d,%d, %d)" % (istart, istop, width)
|
||||||
|
|
||||||
|
# Non-unit step argument supplied.
|
||||||
|
istep = int(step)
|
||||||
|
if istep != step:
|
||||||
|
raise ValueError, "non-integer step for randrange()"
|
||||||
|
if istep > 0:
|
||||||
|
n = (width + istep - 1) // istep
|
||||||
|
elif istep < 0:
|
||||||
|
n = (width + istep + 1) // istep
|
||||||
|
else:
|
||||||
|
raise ValueError, "zero step for randrange()"
|
||||||
|
|
||||||
|
if n <= 0:
|
||||||
|
raise ValueError, "empty range for randrange()"
|
||||||
|
|
||||||
|
if n >= maxwidth:
|
||||||
|
return istart + istep*self._randbelow(n)
|
||||||
|
return istart + istep*int(self.random() * n)
|
||||||
|
|
||||||
|
def randint(self, a, b):
|
||||||
|
"""Return random integer in range [a, b], including both end points.
|
||||||
|
"""
|
||||||
|
|
||||||
|
return self.randrange(a, b+1)
|
||||||
|
|
||||||
|
def _randbelow(self, n, _log=_log, int=int, _maxwidth=1L<<BPF,
|
||||||
|
_Method=_MethodType, _BuiltinMethod=_BuiltinMethodType):
|
||||||
|
"""Return a random int in the range [0,n)
|
||||||
|
|
||||||
|
Handles the case where n has more bits than returned
|
||||||
|
by a single call to the underlying generator.
|
||||||
|
"""
|
||||||
|
|
||||||
|
try:
|
||||||
|
getrandbits = self.getrandbits
|
||||||
|
except AttributeError:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
# Only call self.getrandbits if the original random() builtin method
|
||||||
|
# has not been overridden or if a new getrandbits() was supplied.
|
||||||
|
# This assures that the two methods correspond.
|
||||||
|
if type(self.random) is _BuiltinMethod or type(getrandbits) is _Method:
|
||||||
|
k = int(1.00001 + _log(n-1, 2.0)) # 2**k > n-1 > 2**(k-2)
|
||||||
|
r = getrandbits(k)
|
||||||
|
while r >= n:
|
||||||
|
r = getrandbits(k)
|
||||||
|
return r
|
||||||
|
if n >= _maxwidth:
|
||||||
|
_warn("Underlying random() generator does not supply \n"
|
||||||
|
"enough bits to choose from a population range this large")
|
||||||
|
return int(self.random() * n)
|
||||||
|
|
||||||
|
## -------------------- sequence methods -------------------
|
||||||
|
|
||||||
|
def choice(self, seq):
|
||||||
|
"""Choose a random element from a non-empty sequence."""
|
||||||
|
return seq[int(self.random() * len(seq))] # raises IndexError if seq is empty
|
||||||
|
|
||||||
|
def shuffle(self, x, random=None, int=int):
|
||||||
|
"""x, random=random.random -> shuffle list x in place; return None.
|
||||||
|
|
||||||
|
Optional arg random is a 0-argument function returning a random
|
||||||
|
float in [0.0, 1.0); by default, the standard random.random.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if random is None:
|
||||||
|
random = self.random
|
||||||
|
for i in reversed(xrange(1, len(x))):
|
||||||
|
# pick an element in x[:i+1] with which to exchange x[i]
|
||||||
|
j = int(random() * (i+1))
|
||||||
|
x[i], x[j] = x[j], x[i]
|
||||||
|
|
||||||
|
def sample(self, population, k):
|
||||||
|
"""Chooses k unique random elements from a population sequence.
|
||||||
|
|
||||||
|
Returns a new list containing elements from the population while
|
||||||
|
leaving the original population unchanged. The resulting list is
|
||||||
|
in selection order so that all sub-slices will also be valid random
|
||||||
|
samples. This allows raffle winners (the sample) to be partitioned
|
||||||
|
into grand prize and second place winners (the subslices).
|
||||||
|
|
||||||
|
Members of the population need not be hashable or unique. If the
|
||||||
|
population contains repeats, then each occurrence is a possible
|
||||||
|
selection in the sample.
|
||||||
|
|
||||||
|
To choose a sample in a range of integers, use xrange as an argument.
|
||||||
|
This is especially fast and space efficient for sampling from a
|
||||||
|
large population: sample(xrange(10000000), 60)
|
||||||
|
"""
|
||||||
|
|
||||||
|
# XXX Although the documentation says `population` is "a sequence",
|
||||||
|
# XXX attempts are made to cater to any iterable with a __len__
|
||||||
|
# XXX method. This has had mixed success. Examples from both
|
||||||
|
# XXX sides: sets work fine, and should become officially supported;
|
||||||
|
# XXX dicts are much harder, and have failed in various subtle
|
||||||
|
# XXX ways across attempts. Support for mapping types should probably
|
||||||
|
# XXX be dropped (and users should pass mapping.keys() or .values()
|
||||||
|
# XXX explicitly).
|
||||||
|
|
||||||
|
# Sampling without replacement entails tracking either potential
|
||||||
|
# selections (the pool) in a list or previous selections in a set.
|
||||||
|
|
||||||
|
# When the number of selections is small compared to the
|
||||||
|
# population, then tracking selections is efficient, requiring
|
||||||
|
# only a small set and an occasional reselection. For
|
||||||
|
# a larger number of selections, the pool tracking method is
|
||||||
|
# preferred since the list takes less space than the
|
||||||
|
# set and it doesn't suffer from frequent reselections.
|
||||||
|
|
||||||
|
n = len(population)
|
||||||
|
if not 0 <= k <= n:
|
||||||
|
raise ValueError, "sample larger than population"
|
||||||
|
random = self.random
|
||||||
|
_int = int
|
||||||
|
result = [None] * k
|
||||||
|
setsize = 21 # size of a small set minus size of an empty list
|
||||||
|
if k > 5:
|
||||||
|
setsize += 4 ** _ceil(_log(k * 3, 4)) # table size for big sets
|
||||||
|
if n <= setsize or hasattr(population, "keys"):
|
||||||
|
# An n-length list is smaller than a k-length set, or this is a
|
||||||
|
# mapping type so the other algorithm wouldn't work.
|
||||||
|
pool = list(population)
|
||||||
|
for i in xrange(k): # invariant: non-selected at [0,n-i)
|
||||||
|
j = _int(random() * (n-i))
|
||||||
|
result[i] = pool[j]
|
||||||
|
pool[j] = pool[n-i-1] # move non-selected item into vacancy
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
selected = set()
|
||||||
|
selected_add = selected.add
|
||||||
|
for i in xrange(k):
|
||||||
|
j = _int(random() * n)
|
||||||
|
while j in selected:
|
||||||
|
j = _int(random() * n)
|
||||||
|
selected_add(j)
|
||||||
|
result[i] = population[j]
|
||||||
|
except (TypeError, KeyError): # handle (at least) sets
|
||||||
|
if isinstance(population, list):
|
||||||
|
raise
|
||||||
|
return self.sample(tuple(population), k)
|
||||||
|
return result
|
||||||
|
|
||||||
|
## -------------------- real-valued distributions -------------------
|
||||||
|
|
||||||
|
## -------------------- uniform distribution -------------------
|
||||||
|
|
||||||
|
def uniform(self, a, b):
|
||||||
|
"Get a random number in the range [a, b) or [a, b] depending on rounding."
|
||||||
|
return a + (b-a) * self.random()
|
||||||
|
|
||||||
|
## -------------------- triangular --------------------
|
||||||
|
|
||||||
|
def triangular(self, low=0.0, high=1.0, mode=None):
|
||||||
|
"""Triangular distribution.
|
||||||
|
|
||||||
|
Continuous distribution bounded by given lower and upper limits,
|
||||||
|
and having a given mode value in-between.
|
||||||
|
|
||||||
|
http://en.wikipedia.org/wiki/Triangular_distribution
|
||||||
|
|
||||||
|
"""
|
||||||
|
u = self.random()
|
||||||
|
c = 0.5 if mode is None else (mode - low) / (high - low)
|
||||||
|
if u > c:
|
||||||
|
u = 1.0 - u
|
||||||
|
c = 1.0 - c
|
||||||
|
low, high = high, low
|
||||||
|
return low + (high - low) * (u * c) ** 0.5
|
||||||
|
|
||||||
|
## -------------------- normal distribution --------------------
|
||||||
|
|
||||||
|
def normalvariate(self, mu, sigma):
|
||||||
|
"""Normal distribution.
|
||||||
|
|
||||||
|
mu is the mean, and sigma is the standard deviation.
|
||||||
|
|
||||||
|
"""
|
||||||
|
# mu = mean, sigma = standard deviation
|
||||||
|
|
||||||
|
# Uses Kinderman and Monahan method. Reference: Kinderman,
|
||||||
|
# A.J. and Monahan, J.F., "Computer generation of random
|
||||||
|
# variables using the ratio of uniform deviates", ACM Trans
|
||||||
|
# Math Software, 3, (1977), pp257-260.
|
||||||
|
|
||||||
|
random = self.random
|
||||||
|
while 1:
|
||||||
|
u1 = random()
|
||||||
|
u2 = 1.0 - random()
|
||||||
|
z = NV_MAGICCONST*(u1-0.5)/u2
|
||||||
|
zz = z*z/4.0
|
||||||
|
if zz <= -_log(u2):
|
||||||
|
break
|
||||||
|
return mu + z*sigma
|
||||||
|
|
||||||
|
## -------------------- lognormal distribution --------------------
|
||||||
|
|
||||||
|
def lognormvariate(self, mu, sigma):
|
||||||
|
"""Log normal distribution.
|
||||||
|
|
||||||
|
If you take the natural logarithm of this distribution, you'll get a
|
||||||
|
normal distribution with mean mu and standard deviation sigma.
|
||||||
|
mu can have any value, and sigma must be greater than zero.
|
||||||
|
|
||||||
|
"""
|
||||||
|
return _exp(self.normalvariate(mu, sigma))
|
||||||
|
|
||||||
|
## -------------------- exponential distribution --------------------
|
||||||
|
|
||||||
|
def expovariate(self, lambd):
|
||||||
|
"""Exponential distribution.
|
||||||
|
|
||||||
|
lambd is 1.0 divided by the desired mean. It should be
|
||||||
|
nonzero. (The parameter would be called "lambda", but that is
|
||||||
|
a reserved word in Python.) Returned values range from 0 to
|
||||||
|
positive infinity if lambd is positive, and from negative
|
||||||
|
infinity to 0 if lambd is negative.
|
||||||
|
|
||||||
|
"""
|
||||||
|
# lambd: rate lambd = 1/mean
|
||||||
|
# ('lambda' is a Python reserved word)
|
||||||
|
|
||||||
|
random = self.random
|
||||||
|
u = random()
|
||||||
|
while u <= 1e-7:
|
||||||
|
u = random()
|
||||||
|
return -_log(u)/lambd
|
||||||
|
|
||||||
|
## -------------------- von Mises distribution --------------------
|
||||||
|
|
||||||
|
def vonmisesvariate(self, mu, kappa):
|
||||||
|
"""Circular data distribution.
|
||||||
|
|
||||||
|
mu is the mean angle, expressed in radians between 0 and 2*pi, and
|
||||||
|
kappa is the concentration parameter, which must be greater than or
|
||||||
|
equal to zero. If kappa is equal to zero, this distribution reduces
|
||||||
|
to a uniform random angle over the range 0 to 2*pi.
|
||||||
|
|
||||||
|
"""
|
||||||
|
# mu: mean angle (in radians between 0 and 2*pi)
|
||||||
|
# kappa: concentration parameter kappa (>= 0)
|
||||||
|
# if kappa = 0 generate uniform random angle
|
||||||
|
|
||||||
|
# Based upon an algorithm published in: Fisher, N.I.,
|
||||||
|
# "Statistical Analysis of Circular Data", Cambridge
|
||||||
|
# University Press, 1993.
|
||||||
|
|
||||||
|
# Thanks to Magnus Kessler for a correction to the
|
||||||
|
# implementation of step 4.
|
||||||
|
|
||||||
|
random = self.random
|
||||||
|
if kappa <= 1e-6:
|
||||||
|
return TWOPI * random()
|
||||||
|
|
||||||
|
a = 1.0 + _sqrt(1.0 + 4.0 * kappa * kappa)
|
||||||
|
b = (a - _sqrt(2.0 * a))/(2.0 * kappa)
|
||||||
|
r = (1.0 + b * b)/(2.0 * b)
|
||||||
|
|
||||||
|
while 1:
|
||||||
|
u1 = random()
|
||||||
|
|
||||||
|
z = _cos(_pi * u1)
|
||||||
|
f = (1.0 + r * z)/(r + z)
|
||||||
|
c = kappa * (r - f)
|
||||||
|
|
||||||
|
u2 = random()
|
||||||
|
|
||||||
|
if u2 < c * (2.0 - c) or u2 <= c * _exp(1.0 - c):
|
||||||
|
break
|
||||||
|
|
||||||
|
u3 = random()
|
||||||
|
if u3 > 0.5:
|
||||||
|
theta = (mu % TWOPI) + _acos(f)
|
||||||
|
else:
|
||||||
|
theta = (mu % TWOPI) - _acos(f)
|
||||||
|
|
||||||
|
return theta
|
||||||
|
|
||||||
|
## -------------------- gamma distribution --------------------
|
||||||
|
|
||||||
|
def gammavariate(self, alpha, beta):
|
||||||
|
"""Gamma distribution. Not the gamma function!
|
||||||
|
|
||||||
|
Conditions on the parameters are alpha > 0 and beta > 0.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
# alpha > 0, beta > 0, mean is alpha*beta, variance is alpha*beta**2
|
||||||
|
|
||||||
|
# Warning: a few older sources define the gamma distribution in terms
|
||||||
|
# of alpha > -1.0
|
||||||
|
if alpha <= 0.0 or beta <= 0.0:
|
||||||
|
raise ValueError, 'gammavariate: alpha and beta must be > 0.0'
|
||||||
|
|
||||||
|
random = self.random
|
||||||
|
if alpha > 1.0:
|
||||||
|
|
||||||
|
# Uses R.C.H. Cheng, "The generation of Gamma
|
||||||
|
# variables with non-integral shape parameters",
|
||||||
|
# Applied Statistics, (1977), 26, No. 1, p71-74
|
||||||
|
|
||||||
|
ainv = _sqrt(2.0 * alpha - 1.0)
|
||||||
|
bbb = alpha - LOG4
|
||||||
|
ccc = alpha + ainv
|
||||||
|
|
||||||
|
while 1:
|
||||||
|
u1 = random()
|
||||||
|
if not 1e-7 < u1 < .9999999:
|
||||||
|
continue
|
||||||
|
u2 = 1.0 - random()
|
||||||
|
v = _log(u1/(1.0-u1))/ainv
|
||||||
|
x = alpha*_exp(v)
|
||||||
|
z = u1*u1*u2
|
||||||
|
r = bbb+ccc*v-x
|
||||||
|
if r + SG_MAGICCONST - 4.5*z >= 0.0 or r >= _log(z):
|
||||||
|
return x * beta
|
||||||
|
|
||||||
|
elif alpha == 1.0:
|
||||||
|
# expovariate(1)
|
||||||
|
u = random()
|
||||||
|
while u <= 1e-7:
|
||||||
|
u = random()
|
||||||
|
return -_log(u) * beta
|
||||||
|
|
||||||
|
else: # alpha is between 0 and 1 (exclusive)
|
||||||
|
|
||||||
|
# Uses ALGORITHM GS of Statistical Computing - Kennedy & Gentle
|
||||||
|
|
||||||
|
while 1:
|
||||||
|
u = random()
|
||||||
|
b = (_e + alpha)/_e
|
||||||
|
p = b*u
|
||||||
|
if p <= 1.0:
|
||||||
|
x = p ** (1.0/alpha)
|
||||||
|
else:
|
||||||
|
x = -_log((b-p)/alpha)
|
||||||
|
u1 = random()
|
||||||
|
if p > 1.0:
|
||||||
|
if u1 <= x ** (alpha - 1.0):
|
||||||
|
break
|
||||||
|
elif u1 <= _exp(-x):
|
||||||
|
break
|
||||||
|
return x * beta
|
||||||
|
|
||||||
|
## -------------------- Gauss (faster alternative) --------------------
|
||||||
|
|
||||||
|
def gauss(self, mu, sigma):
|
||||||
|
"""Gaussian distribution.
|
||||||
|
|
||||||
|
mu is the mean, and sigma is the standard deviation. This is
|
||||||
|
slightly faster than the normalvariate() function.
|
||||||
|
|
||||||
|
Not thread-safe without a lock around calls.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
# When x and y are two variables from [0, 1), uniformly
|
||||||
|
# distributed, then
|
||||||
|
#
|
||||||
|
# cos(2*pi*x)*sqrt(-2*log(1-y))
|
||||||
|
# sin(2*pi*x)*sqrt(-2*log(1-y))
|
||||||
|
#
|
||||||
|
# are two *independent* variables with normal distribution
|
||||||
|
# (mu = 0, sigma = 1).
|
||||||
|
# (Lambert Meertens)
|
||||||
|
# (corrected version; bug discovered by Mike Miller, fixed by LM)
|
||||||
|
|
||||||
|
# Multithreading note: When two threads call this function
|
||||||
|
# simultaneously, it is possible that they will receive the
|
||||||
|
# same return value. The window is very small though. To
|
||||||
|
# avoid this, you have to use a lock around all calls. (I
|
||||||
|
# didn't want to slow this down in the serial case by using a
|
||||||
|
# lock here.)
|
||||||
|
|
||||||
|
random = self.random
|
||||||
|
z = self.gauss_next
|
||||||
|
self.gauss_next = None
|
||||||
|
if z is None:
|
||||||
|
x2pi = random() * TWOPI
|
||||||
|
g2rad = _sqrt(-2.0 * _log(1.0 - random()))
|
||||||
|
z = _cos(x2pi) * g2rad
|
||||||
|
self.gauss_next = _sin(x2pi) * g2rad
|
||||||
|
|
||||||
|
return mu + z*sigma
|
||||||
|
|
||||||
|
## -------------------- beta --------------------
|
||||||
|
## See
|
||||||
|
## http://sourceforge.net/bugs/?func=detailbug&bug_id=130030&group_id=5470
|
||||||
|
## for Ivan Frohne's insightful analysis of why the original implementation:
|
||||||
|
##
|
||||||
|
## def betavariate(self, alpha, beta):
|
||||||
|
## # Discrete Event Simulation in C, pp 87-88.
|
||||||
|
##
|
||||||
|
## y = self.expovariate(alpha)
|
||||||
|
## z = self.expovariate(1.0/beta)
|
||||||
|
## return z/(y+z)
|
||||||
|
##
|
||||||
|
## was dead wrong, and how it probably got that way.
|
||||||
|
|
||||||
|
def betavariate(self, alpha, beta):
|
||||||
|
"""Beta distribution.
|
||||||
|
|
||||||
|
Conditions on the parameters are alpha > 0 and beta > 0.
|
||||||
|
Returned values range between 0 and 1.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
# This version due to Janne Sinkkonen, and matches all the std
|
||||||
|
# texts (e.g., Knuth Vol 2 Ed 3 pg 134 "the beta distribution").
|
||||||
|
y = self.gammavariate(alpha, 1.)
|
||||||
|
if y == 0:
|
||||||
|
return 0.0
|
||||||
|
else:
|
||||||
|
return y / (y + self.gammavariate(beta, 1.))
|
||||||
|
|
||||||
|
## -------------------- Pareto --------------------
|
||||||
|
|
||||||
|
def paretovariate(self, alpha):
|
||||||
|
"""Pareto distribution. alpha is the shape parameter."""
|
||||||
|
# Jain, pg. 495
|
||||||
|
|
||||||
|
u = 1.0 - self.random()
|
||||||
|
return 1.0 / pow(u, 1.0/alpha)
|
||||||
|
|
||||||
|
## -------------------- Weibull --------------------
|
||||||
|
|
||||||
|
def weibullvariate(self, alpha, beta):
|
||||||
|
"""Weibull distribution.
|
||||||
|
|
||||||
|
alpha is the scale parameter and beta is the shape parameter.
|
||||||
|
|
||||||
|
"""
|
||||||
|
# Jain, pg. 499; bug fix courtesy Bill Arms
|
||||||
|
|
||||||
|
u = 1.0 - self.random()
|
||||||
|
return alpha * pow(-_log(u), 1.0/beta)
|
||||||
|
|
||||||
|
## -------------------- Wichmann-Hill -------------------
|
||||||
|
|
||||||
|
class WichmannHill(Random):
|
||||||
|
|
||||||
|
VERSION = 1 # used by getstate/setstate
|
||||||
|
|
||||||
|
def seed(self, a=None):
|
||||||
|
"""Initialize internal state from hashable object.
|
||||||
|
|
||||||
|
None or no argument seeds from current time or from an operating
|
||||||
|
system specific randomness source if available.
|
||||||
|
|
||||||
|
If a is not None or an int or long, hash(a) is used instead.
|
||||||
|
|
||||||
|
If a is an int or long, a is used directly. Distinct values between
|
||||||
|
0 and 27814431486575L inclusive are guaranteed to yield distinct
|
||||||
|
internal states (this guarantee is specific to the default
|
||||||
|
Wichmann-Hill generator).
|
||||||
|
"""
|
||||||
|
|
||||||
|
if a is None:
|
||||||
|
try:
|
||||||
|
a = long(_hexlify(_urandom(16)), 16)
|
||||||
|
except NotImplementedError:
|
||||||
|
import time
|
||||||
|
a = long(time.time() * 256) # use fractional seconds
|
||||||
|
|
||||||
|
if not isinstance(a, (int, long)):
|
||||||
|
a = hash(a)
|
||||||
|
|
||||||
|
a, x = divmod(a, 30268)
|
||||||
|
a, y = divmod(a, 30306)
|
||||||
|
a, z = divmod(a, 30322)
|
||||||
|
self._seed = int(x)+1, int(y)+1, int(z)+1
|
||||||
|
|
||||||
|
self.gauss_next = None
|
||||||
|
|
||||||
|
def random(self):
|
||||||
|
"""Get the next random number in the range [0.0, 1.0)."""
|
||||||
|
|
||||||
|
# Wichman-Hill random number generator.
|
||||||
|
#
|
||||||
|
# Wichmann, B. A. & Hill, I. D. (1982)
|
||||||
|
# Algorithm AS 183:
|
||||||
|
# An efficient and portable pseudo-random number generator
|
||||||
|
# Applied Statistics 31 (1982) 188-190
|
||||||
|
#
|
||||||
|
# see also:
|
||||||
|
# Correction to Algorithm AS 183
|
||||||
|
# Applied Statistics 33 (1984) 123
|
||||||
|
#
|
||||||
|
# McLeod, A. I. (1985)
|
||||||
|
# A remark on Algorithm AS 183
|
||||||
|
# Applied Statistics 34 (1985),198-200
|
||||||
|
|
||||||
|
# This part is thread-unsafe:
|
||||||
|
# BEGIN CRITICAL SECTION
|
||||||
|
x, y, z = self._seed
|
||||||
|
x = (171 * x) % 30269
|
||||||
|
y = (172 * y) % 30307
|
||||||
|
z = (170 * z) % 30323
|
||||||
|
self._seed = x, y, z
|
||||||
|
# END CRITICAL SECTION
|
||||||
|
|
||||||
|
# Note: on a platform using IEEE-754 double arithmetic, this can
|
||||||
|
# never return 0.0 (asserted by Tim; proof too long for a comment).
|
||||||
|
return (x/30269.0 + y/30307.0 + z/30323.0) % 1.0
|
||||||
|
|
||||||
|
def getstate(self):
|
||||||
|
"""Return internal state; can be passed to setstate() later."""
|
||||||
|
return self.VERSION, self._seed, self.gauss_next
|
||||||
|
|
||||||
|
def setstate(self, state):
|
||||||
|
"""Restore internal state from object returned by getstate()."""
|
||||||
|
version = state[0]
|
||||||
|
if version == 1:
|
||||||
|
version, self._seed, self.gauss_next = state
|
||||||
|
else:
|
||||||
|
raise ValueError("state with version %s passed to "
|
||||||
|
"Random.setstate() of version %s" %
|
||||||
|
(version, self.VERSION))
|
||||||
|
|
||||||
|
def jumpahead(self, n):
|
||||||
|
"""Act as if n calls to random() were made, but quickly.
|
||||||
|
|
||||||
|
n is an int, greater than or equal to 0.
|
||||||
|
|
||||||
|
Example use: If you have 2 threads and know that each will
|
||||||
|
consume no more than a million random numbers, create two Random
|
||||||
|
objects r1 and r2, then do
|
||||||
|
r2.setstate(r1.getstate())
|
||||||
|
r2.jumpahead(1000000)
|
||||||
|
Then r1 and r2 will use guaranteed-disjoint segments of the full
|
||||||
|
period.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if not n >= 0:
|
||||||
|
raise ValueError("n must be >= 0")
|
||||||
|
x, y, z = self._seed
|
||||||
|
x = int(x * pow(171, n, 30269)) % 30269
|
||||||
|
y = int(y * pow(172, n, 30307)) % 30307
|
||||||
|
z = int(z * pow(170, n, 30323)) % 30323
|
||||||
|
self._seed = x, y, z
|
||||||
|
|
||||||
|
def __whseed(self, x=0, y=0, z=0):
|
||||||
|
"""Set the Wichmann-Hill seed from (x, y, z).
|
||||||
|
|
||||||
|
These must be integers in the range [0, 256).
|
||||||
|
"""
|
||||||
|
|
||||||
|
if not type(x) == type(y) == type(z) == int:
|
||||||
|
raise TypeError('seeds must be integers')
|
||||||
|
if not (0 <= x < 256 and 0 <= y < 256 and 0 <= z < 256):
|
||||||
|
raise ValueError('seeds must be in range(0, 256)')
|
||||||
|
if 0 == x == y == z:
|
||||||
|
# Initialize from current time
|
||||||
|
import time
|
||||||
|
t = long(time.time() * 256)
|
||||||
|
t = int((t&0xffffff) ^ (t>>24))
|
||||||
|
t, x = divmod(t, 256)
|
||||||
|
t, y = divmod(t, 256)
|
||||||
|
t, z = divmod(t, 256)
|
||||||
|
# Zero is a poor seed, so substitute 1
|
||||||
|
self._seed = (x or 1, y or 1, z or 1)
|
||||||
|
|
||||||
|
self.gauss_next = None
|
||||||
|
|
||||||
|
def whseed(self, a=None):
|
||||||
|
"""Seed from hashable object's hash code.
|
||||||
|
|
||||||
|
None or no argument seeds from current time. It is not guaranteed
|
||||||
|
that objects with distinct hash codes lead to distinct internal
|
||||||
|
states.
|
||||||
|
|
||||||
|
This is obsolete, provided for compatibility with the seed routine
|
||||||
|
used prior to Python 2.1. Use the .seed() method instead.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if a is None:
|
||||||
|
self.__whseed()
|
||||||
|
return
|
||||||
|
a = hash(a)
|
||||||
|
a, x = divmod(a, 256)
|
||||||
|
a, y = divmod(a, 256)
|
||||||
|
a, z = divmod(a, 256)
|
||||||
|
x = (x + a) % 256 or 1
|
||||||
|
y = (y + a) % 256 or 1
|
||||||
|
z = (z + a) % 256 or 1
|
||||||
|
self.__whseed(x, y, z)
|
||||||
|
|
||||||
|
## --------------- Operating System Random Source ------------------
|
||||||
|
|
||||||
|
class SystemRandom(Random):
|
||||||
|
"""Alternate random number generator using sources provided
|
||||||
|
by the operating system (such as /dev/urandom on Unix or
|
||||||
|
CryptGenRandom on Windows).
|
||||||
|
|
||||||
|
Not available on all systems (see os.urandom() for details).
|
||||||
|
"""
|
||||||
|
|
||||||
|
def random(self):
|
||||||
|
"""Get the next random number in the range [0.0, 1.0)."""
|
||||||
|
return (long(_hexlify(_urandom(7)), 16) >> 3) * RECIP_BPF
|
||||||
|
|
||||||
|
def getrandbits(self, k):
|
||||||
|
"""getrandbits(k) -> x. Generates a long int with k random bits."""
|
||||||
|
if k <= 0:
|
||||||
|
raise ValueError('number of bits must be greater than zero')
|
||||||
|
if k != int(k):
|
||||||
|
raise TypeError('number of bits should be an integer')
|
||||||
|
bytes = (k + 7) // 8 # bits / 8 and rounded up
|
||||||
|
x = long(_hexlify(_urandom(bytes)), 16)
|
||||||
|
return x >> (bytes * 8 - k) # trim excess bits
|
||||||
|
|
||||||
|
def _stub(self, *args, **kwds):
|
||||||
|
"Stub method. Not used for a system random number generator."
|
||||||
|
return None
|
||||||
|
seed = jumpahead = _stub
|
||||||
|
|
||||||
|
def _notimplemented(self, *args, **kwds):
|
||||||
|
"Method should not be called for a system random number generator."
|
||||||
|
raise NotImplementedError('System entropy source does not have state.')
|
||||||
|
getstate = setstate = _notimplemented
|
||||||
|
|
||||||
|
## -------------------- test program --------------------
|
||||||
|
|
||||||
|
def _test_generator(n, func, args):
|
||||||
|
import time
|
||||||
|
print n, 'times', func.__name__
|
||||||
|
total = 0.0
|
||||||
|
sqsum = 0.0
|
||||||
|
smallest = 1e10
|
||||||
|
largest = -1e10
|
||||||
|
t0 = time.time()
|
||||||
|
for i in range(n):
|
||||||
|
x = func(*args)
|
||||||
|
total += x
|
||||||
|
sqsum = sqsum + x*x
|
||||||
|
smallest = min(x, smallest)
|
||||||
|
largest = max(x, largest)
|
||||||
|
t1 = time.time()
|
||||||
|
print round(t1-t0, 3), 'sec,',
|
||||||
|
avg = total/n
|
||||||
|
stddev = _sqrt(sqsum/n - avg*avg)
|
||||||
|
print 'avg %g, stddev %g, min %g, max %g' % \
|
||||||
|
(avg, stddev, smallest, largest)
|
||||||
|
|
||||||
|
|
||||||
|
def _test(N=2000):
|
||||||
|
_test_generator(N, random, ())
|
||||||
|
_test_generator(N, normalvariate, (0.0, 1.0))
|
||||||
|
_test_generator(N, lognormvariate, (0.0, 1.0))
|
||||||
|
_test_generator(N, vonmisesvariate, (0.0, 1.0))
|
||||||
|
_test_generator(N, gammavariate, (0.01, 1.0))
|
||||||
|
_test_generator(N, gammavariate, (0.1, 1.0))
|
||||||
|
_test_generator(N, gammavariate, (0.1, 2.0))
|
||||||
|
_test_generator(N, gammavariate, (0.5, 1.0))
|
||||||
|
_test_generator(N, gammavariate, (0.9, 1.0))
|
||||||
|
_test_generator(N, gammavariate, (1.0, 1.0))
|
||||||
|
_test_generator(N, gammavariate, (2.0, 1.0))
|
||||||
|
_test_generator(N, gammavariate, (20.0, 1.0))
|
||||||
|
_test_generator(N, gammavariate, (200.0, 1.0))
|
||||||
|
_test_generator(N, gauss, (0.0, 1.0))
|
||||||
|
_test_generator(N, betavariate, (3.0, 3.0))
|
||||||
|
_test_generator(N, triangular, (0.0, 1.0, 1.0/3.0))
|
||||||
|
|
||||||
|
# Create one instance, seeded from current time, and export its methods
|
||||||
|
# as module-level functions. The functions share state across all uses
|
||||||
|
#(both in the user's code and in the Python libraries), but that's fine
|
||||||
|
# for most programs and is easier for the casual user than making them
|
||||||
|
# instantiate their own Random() instance.
|
||||||
|
|
||||||
|
_inst = Random()
|
||||||
|
seed = _inst.seed
|
||||||
|
random = _inst.random
|
||||||
|
uniform = _inst.uniform
|
||||||
|
triangular = _inst.triangular
|
||||||
|
randint = _inst.randint
|
||||||
|
choice = _inst.choice
|
||||||
|
randrange = _inst.randrange
|
||||||
|
sample = _inst.sample
|
||||||
|
shuffle = _inst.shuffle
|
||||||
|
normalvariate = _inst.normalvariate
|
||||||
|
lognormvariate = _inst.lognormvariate
|
||||||
|
expovariate = _inst.expovariate
|
||||||
|
vonmisesvariate = _inst.vonmisesvariate
|
||||||
|
gammavariate = _inst.gammavariate
|
||||||
|
gauss = _inst.gauss
|
||||||
|
betavariate = _inst.betavariate
|
||||||
|
paretovariate = _inst.paretovariate
|
||||||
|
weibullvariate = _inst.weibullvariate
|
||||||
|
getstate = _inst.getstate
|
||||||
|
setstate = _inst.setstate
|
||||||
|
jumpahead = _inst.jumpahead
|
||||||
|
getrandbits = _inst.getrandbits
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
_test()
|
BIN
icarus-miner/data/usr/lib/python2.6/lib-dynload/random.pyc
Normal file
BIN
icarus-miner/data/usr/lib/python2.6/lib-dynload/random.pyc
Normal file
Binary file not shown.
BIN
icarus-miner/data/usr/lib/python2.6/lib-dynload/select.so
Executable file
BIN
icarus-miner/data/usr/lib/python2.6/lib-dynload/select.so
Executable file
Binary file not shown.
603
icarus-miner/data/usr/lib/python2.6/lib-dynload/tempfile.py
Normal file
603
icarus-miner/data/usr/lib/python2.6/lib-dynload/tempfile.py
Normal file
@ -0,0 +1,603 @@
|
|||||||
|
"""Temporary files.
|
||||||
|
|
||||||
|
This module provides generic, low- and high-level interfaces for
|
||||||
|
creating temporary files and directories. The interfaces listed
|
||||||
|
as "safe" just below can be used without fear of race conditions.
|
||||||
|
Those listed as "unsafe" cannot, and are provided for backward
|
||||||
|
compatibility only.
|
||||||
|
|
||||||
|
This module also provides some data items to the user:
|
||||||
|
|
||||||
|
TMP_MAX - maximum number of names that will be tried before
|
||||||
|
giving up.
|
||||||
|
template - the default prefix for all temporary names.
|
||||||
|
You may change this to control the default prefix.
|
||||||
|
tempdir - If this is set to a string before the first use of
|
||||||
|
any routine from this module, it will be considered as
|
||||||
|
another candidate location to store temporary files.
|
||||||
|
"""
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
"NamedTemporaryFile", "TemporaryFile", # high level safe interfaces
|
||||||
|
"SpooledTemporaryFile",
|
||||||
|
"mkstemp", "mkdtemp", # low level safe interfaces
|
||||||
|
"mktemp", # deprecated unsafe interface
|
||||||
|
"TMP_MAX", "gettempprefix", # constants
|
||||||
|
"tempdir", "gettempdir"
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
# Imports.
|
||||||
|
|
||||||
|
import os as _os
|
||||||
|
import errno as _errno
|
||||||
|
from random import Random as _Random
|
||||||
|
|
||||||
|
try:
|
||||||
|
from cStringIO import StringIO as _StringIO
|
||||||
|
except ImportError:
|
||||||
|
from StringIO import StringIO as _StringIO
|
||||||
|
|
||||||
|
try:
|
||||||
|
import fcntl as _fcntl
|
||||||
|
except ImportError:
|
||||||
|
def _set_cloexec(fd):
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
def _set_cloexec(fd):
|
||||||
|
try:
|
||||||
|
flags = _fcntl.fcntl(fd, _fcntl.F_GETFD, 0)
|
||||||
|
except IOError:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
# flags read successfully, modify
|
||||||
|
flags |= _fcntl.FD_CLOEXEC
|
||||||
|
_fcntl.fcntl(fd, _fcntl.F_SETFD, flags)
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
import thread as _thread
|
||||||
|
except ImportError:
|
||||||
|
import dummy_thread as _thread
|
||||||
|
_allocate_lock = _thread.allocate_lock
|
||||||
|
|
||||||
|
_text_openflags = _os.O_RDWR | _os.O_CREAT | _os.O_EXCL
|
||||||
|
if hasattr(_os, 'O_NOINHERIT'):
|
||||||
|
_text_openflags |= _os.O_NOINHERIT
|
||||||
|
if hasattr(_os, 'O_NOFOLLOW'):
|
||||||
|
_text_openflags |= _os.O_NOFOLLOW
|
||||||
|
|
||||||
|
_bin_openflags = _text_openflags
|
||||||
|
if hasattr(_os, 'O_BINARY'):
|
||||||
|
_bin_openflags |= _os.O_BINARY
|
||||||
|
|
||||||
|
if hasattr(_os, 'TMP_MAX'):
|
||||||
|
TMP_MAX = _os.TMP_MAX
|
||||||
|
else:
|
||||||
|
TMP_MAX = 10000
|
||||||
|
|
||||||
|
template = "tmp"
|
||||||
|
|
||||||
|
# Internal routines.
|
||||||
|
|
||||||
|
_once_lock = _allocate_lock()
|
||||||
|
|
||||||
|
if hasattr(_os, "lstat"):
|
||||||
|
_stat = _os.lstat
|
||||||
|
elif hasattr(_os, "stat"):
|
||||||
|
_stat = _os.stat
|
||||||
|
else:
|
||||||
|
# Fallback. All we need is something that raises os.error if the
|
||||||
|
# file doesn't exist.
|
||||||
|
def _stat(fn):
|
||||||
|
try:
|
||||||
|
f = open(fn)
|
||||||
|
except IOError:
|
||||||
|
raise _os.error
|
||||||
|
f.close()
|
||||||
|
|
||||||
|
def _exists(fn):
|
||||||
|
try:
|
||||||
|
_stat(fn)
|
||||||
|
except _os.error:
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
return True
|
||||||
|
|
||||||
|
class _RandomNameSequence:
|
||||||
|
"""An instance of _RandomNameSequence generates an endless
|
||||||
|
sequence of unpredictable strings which can safely be incorporated
|
||||||
|
into file names. Each string is six characters long. Multiple
|
||||||
|
threads can safely use the same instance at the same time.
|
||||||
|
|
||||||
|
_RandomNameSequence is an iterator."""
|
||||||
|
|
||||||
|
characters = ("abcdefghijklmnopqrstuvwxyz" +
|
||||||
|
"ABCDEFGHIJKLMNOPQRSTUVWXYZ" +
|
||||||
|
"0123456789_")
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.mutex = _allocate_lock()
|
||||||
|
self.rng = _Random()
|
||||||
|
self.normcase = _os.path.normcase
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
return self
|
||||||
|
|
||||||
|
def next(self):
|
||||||
|
m = self.mutex
|
||||||
|
c = self.characters
|
||||||
|
choose = self.rng.choice
|
||||||
|
|
||||||
|
m.acquire()
|
||||||
|
try:
|
||||||
|
letters = [choose(c) for dummy in "123456"]
|
||||||
|
finally:
|
||||||
|
m.release()
|
||||||
|
|
||||||
|
return self.normcase(''.join(letters))
|
||||||
|
|
||||||
|
def _candidate_tempdir_list():
|
||||||
|
"""Generate a list of candidate temporary directories which
|
||||||
|
_get_default_tempdir will try."""
|
||||||
|
|
||||||
|
dirlist = []
|
||||||
|
|
||||||
|
# First, try the environment.
|
||||||
|
for envname in 'TMPDIR', 'TEMP', 'TMP':
|
||||||
|
dirname = _os.getenv(envname)
|
||||||
|
if dirname: dirlist.append(dirname)
|
||||||
|
|
||||||
|
# Failing that, try OS-specific locations.
|
||||||
|
if _os.name == 'riscos':
|
||||||
|
dirname = _os.getenv('Wimp$ScrapDir')
|
||||||
|
if dirname: dirlist.append(dirname)
|
||||||
|
elif _os.name == 'nt':
|
||||||
|
dirlist.extend([ r'c:\temp', r'c:\tmp', r'\temp', r'\tmp' ])
|
||||||
|
else:
|
||||||
|
dirlist.extend([ '/tmp', '/var/tmp', '/usr/tmp' ])
|
||||||
|
|
||||||
|
# As a last resort, the current directory.
|
||||||
|
try:
|
||||||
|
dirlist.append(_os.getcwd())
|
||||||
|
except (AttributeError, _os.error):
|
||||||
|
dirlist.append(_os.curdir)
|
||||||
|
|
||||||
|
return dirlist
|
||||||
|
|
||||||
|
def _get_default_tempdir():
|
||||||
|
"""Calculate the default directory to use for temporary files.
|
||||||
|
This routine should be called exactly once.
|
||||||
|
|
||||||
|
We determine whether or not a candidate temp dir is usable by
|
||||||
|
trying to create and write to a file in that directory. If this
|
||||||
|
is successful, the test file is deleted. To prevent denial of
|
||||||
|
service, the name of the test file must be randomized."""
|
||||||
|
|
||||||
|
namer = _RandomNameSequence()
|
||||||
|
dirlist = _candidate_tempdir_list()
|
||||||
|
flags = _text_openflags
|
||||||
|
|
||||||
|
for dir in dirlist:
|
||||||
|
if dir != _os.curdir:
|
||||||
|
dir = _os.path.normcase(_os.path.abspath(dir))
|
||||||
|
# Try only a few names per directory.
|
||||||
|
for seq in xrange(100):
|
||||||
|
name = namer.next()
|
||||||
|
filename = _os.path.join(dir, name)
|
||||||
|
try:
|
||||||
|
fd = _os.open(filename, flags, 0600)
|
||||||
|
fp = _os.fdopen(fd, 'w')
|
||||||
|
fp.write('blat')
|
||||||
|
fp.close()
|
||||||
|
_os.unlink(filename)
|
||||||
|
del fp, fd
|
||||||
|
return dir
|
||||||
|
except (OSError, IOError), e:
|
||||||
|
if e[0] != _errno.EEXIST:
|
||||||
|
break # no point trying more names in this directory
|
||||||
|
pass
|
||||||
|
raise IOError, (_errno.ENOENT,
|
||||||
|
("No usable temporary directory found in %s" % dirlist))
|
||||||
|
|
||||||
|
_name_sequence = None
|
||||||
|
|
||||||
|
def _get_candidate_names():
|
||||||
|
"""Common setup sequence for all user-callable interfaces."""
|
||||||
|
|
||||||
|
global _name_sequence
|
||||||
|
if _name_sequence is None:
|
||||||
|
_once_lock.acquire()
|
||||||
|
try:
|
||||||
|
if _name_sequence is None:
|
||||||
|
_name_sequence = _RandomNameSequence()
|
||||||
|
finally:
|
||||||
|
_once_lock.release()
|
||||||
|
return _name_sequence
|
||||||
|
|
||||||
|
|
||||||
|
def _mkstemp_inner(dir, pre, suf, flags):
|
||||||
|
"""Code common to mkstemp, TemporaryFile, and NamedTemporaryFile."""
|
||||||
|
|
||||||
|
names = _get_candidate_names()
|
||||||
|
|
||||||
|
for seq in xrange(TMP_MAX):
|
||||||
|
name = names.next()
|
||||||
|
file = _os.path.join(dir, pre + name + suf)
|
||||||
|
try:
|
||||||
|
fd = _os.open(file, flags, 0600)
|
||||||
|
_set_cloexec(fd)
|
||||||
|
return (fd, _os.path.abspath(file))
|
||||||
|
except OSError, e:
|
||||||
|
if e.errno == _errno.EEXIST:
|
||||||
|
continue # try again
|
||||||
|
raise
|
||||||
|
|
||||||
|
raise IOError, (_errno.EEXIST, "No usable temporary file name found")
|
||||||
|
|
||||||
|
|
||||||
|
# User visible interfaces.
|
||||||
|
|
||||||
|
def gettempprefix():
|
||||||
|
"""Accessor for tempdir.template."""
|
||||||
|
return template
|
||||||
|
|
||||||
|
tempdir = None
|
||||||
|
|
||||||
|
def gettempdir():
|
||||||
|
"""Accessor for tempfile.tempdir."""
|
||||||
|
global tempdir
|
||||||
|
if tempdir is None:
|
||||||
|
_once_lock.acquire()
|
||||||
|
try:
|
||||||
|
if tempdir is None:
|
||||||
|
tempdir = _get_default_tempdir()
|
||||||
|
finally:
|
||||||
|
_once_lock.release()
|
||||||
|
return tempdir
|
||||||
|
|
||||||
|
def mkstemp(suffix="", prefix=template, dir=None, text=False):
|
||||||
|
"""User-callable function to create and return a unique temporary
|
||||||
|
file. The return value is a pair (fd, name) where fd is the
|
||||||
|
file descriptor returned by os.open, and name is the filename.
|
||||||
|
|
||||||
|
If 'suffix' is specified, the file name will end with that suffix,
|
||||||
|
otherwise there will be no suffix.
|
||||||
|
|
||||||
|
If 'prefix' is specified, the file name will begin with that prefix,
|
||||||
|
otherwise a default prefix is used.
|
||||||
|
|
||||||
|
If 'dir' is specified, the file will be created in that directory,
|
||||||
|
otherwise a default directory is used.
|
||||||
|
|
||||||
|
If 'text' is specified and true, the file is opened in text
|
||||||
|
mode. Else (the default) the file is opened in binary mode. On
|
||||||
|
some operating systems, this makes no difference.
|
||||||
|
|
||||||
|
The file is readable and writable only by the creating user ID.
|
||||||
|
If the operating system uses permission bits to indicate whether a
|
||||||
|
file is executable, the file is executable by no one. The file
|
||||||
|
descriptor is not inherited by children of this process.
|
||||||
|
|
||||||
|
Caller is responsible for deleting the file when done with it.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if dir is None:
|
||||||
|
dir = gettempdir()
|
||||||
|
|
||||||
|
if text:
|
||||||
|
flags = _text_openflags
|
||||||
|
else:
|
||||||
|
flags = _bin_openflags
|
||||||
|
|
||||||
|
return _mkstemp_inner(dir, prefix, suffix, flags)
|
||||||
|
|
||||||
|
|
||||||
|
def mkdtemp(suffix="", prefix=template, dir=None):
|
||||||
|
"""User-callable function to create and return a unique temporary
|
||||||
|
directory. The return value is the pathname of the directory.
|
||||||
|
|
||||||
|
Arguments are as for mkstemp, except that the 'text' argument is
|
||||||
|
not accepted.
|
||||||
|
|
||||||
|
The directory is readable, writable, and searchable only by the
|
||||||
|
creating user.
|
||||||
|
|
||||||
|
Caller is responsible for deleting the directory when done with it.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if dir is None:
|
||||||
|
dir = gettempdir()
|
||||||
|
|
||||||
|
names = _get_candidate_names()
|
||||||
|
|
||||||
|
for seq in xrange(TMP_MAX):
|
||||||
|
name = names.next()
|
||||||
|
file = _os.path.join(dir, prefix + name + suffix)
|
||||||
|
try:
|
||||||
|
_os.mkdir(file, 0700)
|
||||||
|
return file
|
||||||
|
except OSError, e:
|
||||||
|
if e.errno == _errno.EEXIST:
|
||||||
|
continue # try again
|
||||||
|
raise
|
||||||
|
|
||||||
|
raise IOError, (_errno.EEXIST, "No usable temporary directory name found")
|
||||||
|
|
||||||
|
def mktemp(suffix="", prefix=template, dir=None):
|
||||||
|
"""User-callable function to return a unique temporary file name. The
|
||||||
|
file is not created.
|
||||||
|
|
||||||
|
Arguments are as for mkstemp, except that the 'text' argument is
|
||||||
|
not accepted.
|
||||||
|
|
||||||
|
This function is unsafe and should not be used. The file name
|
||||||
|
refers to a file that did not exist at some point, but by the time
|
||||||
|
you get around to creating it, someone else may have beaten you to
|
||||||
|
the punch.
|
||||||
|
"""
|
||||||
|
|
||||||
|
## from warnings import warn as _warn
|
||||||
|
## _warn("mktemp is a potential security risk to your program",
|
||||||
|
## RuntimeWarning, stacklevel=2)
|
||||||
|
|
||||||
|
if dir is None:
|
||||||
|
dir = gettempdir()
|
||||||
|
|
||||||
|
names = _get_candidate_names()
|
||||||
|
for seq in xrange(TMP_MAX):
|
||||||
|
name = names.next()
|
||||||
|
file = _os.path.join(dir, prefix + name + suffix)
|
||||||
|
if not _exists(file):
|
||||||
|
return file
|
||||||
|
|
||||||
|
raise IOError, (_errno.EEXIST, "No usable temporary filename found")
|
||||||
|
|
||||||
|
|
||||||
|
class _TemporaryFileWrapper:
|
||||||
|
"""Temporary file wrapper
|
||||||
|
|
||||||
|
This class provides a wrapper around files opened for
|
||||||
|
temporary use. In particular, it seeks to automatically
|
||||||
|
remove the file when it is no longer needed.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, file, name, delete=True):
|
||||||
|
self.file = file
|
||||||
|
self.name = name
|
||||||
|
self.close_called = False
|
||||||
|
self.delete = delete
|
||||||
|
|
||||||
|
def __getattr__(self, name):
|
||||||
|
# Attribute lookups are delegated to the underlying file
|
||||||
|
# and cached for non-numeric results
|
||||||
|
# (i.e. methods are cached, closed and friends are not)
|
||||||
|
file = self.__dict__['file']
|
||||||
|
a = getattr(file, name)
|
||||||
|
if not issubclass(type(a), type(0)):
|
||||||
|
setattr(self, name, a)
|
||||||
|
return a
|
||||||
|
|
||||||
|
# The underlying __enter__ method returns the wrong object
|
||||||
|
# (self.file) so override it to return the wrapper
|
||||||
|
def __enter__(self):
|
||||||
|
self.file.__enter__()
|
||||||
|
return self
|
||||||
|
|
||||||
|
# NT provides delete-on-close as a primitive, so we don't need
|
||||||
|
# the wrapper to do anything special. We still use it so that
|
||||||
|
# file.name is useful (i.e. not "(fdopen)") with NamedTemporaryFile.
|
||||||
|
if _os.name != 'nt':
|
||||||
|
# Cache the unlinker so we don't get spurious errors at
|
||||||
|
# shutdown when the module-level "os" is None'd out. Note
|
||||||
|
# that this must be referenced as self.unlink, because the
|
||||||
|
# name TemporaryFileWrapper may also get None'd out before
|
||||||
|
# __del__ is called.
|
||||||
|
unlink = _os.unlink
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
if not self.close_called:
|
||||||
|
self.close_called = True
|
||||||
|
self.file.close()
|
||||||
|
if self.delete:
|
||||||
|
self.unlink(self.name)
|
||||||
|
|
||||||
|
def __del__(self):
|
||||||
|
self.close()
|
||||||
|
|
||||||
|
# Need to trap __exit__ as well to ensure the file gets
|
||||||
|
# deleted when used in a with statement
|
||||||
|
def __exit__(self, exc, value, tb):
|
||||||
|
result = self.file.__exit__(exc, value, tb)
|
||||||
|
self.close()
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def NamedTemporaryFile(mode='w+b', bufsize=-1, suffix="",
|
||||||
|
prefix=template, dir=None, delete=True):
|
||||||
|
"""Create and return a temporary file.
|
||||||
|
Arguments:
|
||||||
|
'prefix', 'suffix', 'dir' -- as for mkstemp.
|
||||||
|
'mode' -- the mode argument to os.fdopen (default "w+b").
|
||||||
|
'bufsize' -- the buffer size argument to os.fdopen (default -1).
|
||||||
|
'delete' -- whether the file is deleted on close (default True).
|
||||||
|
The file is created as mkstemp() would do it.
|
||||||
|
|
||||||
|
Returns an object with a file-like interface; the name of the file
|
||||||
|
is accessible as file.name. The file will be automatically deleted
|
||||||
|
when it is closed unless the 'delete' argument is set to False.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if dir is None:
|
||||||
|
dir = gettempdir()
|
||||||
|
|
||||||
|
if 'b' in mode:
|
||||||
|
flags = _bin_openflags
|
||||||
|
else:
|
||||||
|
flags = _text_openflags
|
||||||
|
|
||||||
|
# Setting O_TEMPORARY in the flags causes the OS to delete
|
||||||
|
# the file when it is closed. This is only supported by Windows.
|
||||||
|
if _os.name == 'nt' and delete:
|
||||||
|
flags |= _os.O_TEMPORARY
|
||||||
|
|
||||||
|
(fd, name) = _mkstemp_inner(dir, prefix, suffix, flags)
|
||||||
|
file = _os.fdopen(fd, mode, bufsize)
|
||||||
|
return _TemporaryFileWrapper(file, name, delete)
|
||||||
|
|
||||||
|
if _os.name != 'posix' or _os.sys.platform == 'cygwin':
|
||||||
|
# On non-POSIX and Cygwin systems, assume that we cannot unlink a file
|
||||||
|
# while it is open.
|
||||||
|
TemporaryFile = NamedTemporaryFile
|
||||||
|
|
||||||
|
else:
|
||||||
|
def TemporaryFile(mode='w+b', bufsize=-1, suffix="",
|
||||||
|
prefix=template, dir=None):
|
||||||
|
"""Create and return a temporary file.
|
||||||
|
Arguments:
|
||||||
|
'prefix', 'suffix', 'dir' -- as for mkstemp.
|
||||||
|
'mode' -- the mode argument to os.fdopen (default "w+b").
|
||||||
|
'bufsize' -- the buffer size argument to os.fdopen (default -1).
|
||||||
|
The file is created as mkstemp() would do it.
|
||||||
|
|
||||||
|
Returns an object with a file-like interface. The file has no
|
||||||
|
name, and will cease to exist when it is closed.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if dir is None:
|
||||||
|
dir = gettempdir()
|
||||||
|
|
||||||
|
if 'b' in mode:
|
||||||
|
flags = _bin_openflags
|
||||||
|
else:
|
||||||
|
flags = _text_openflags
|
||||||
|
|
||||||
|
(fd, name) = _mkstemp_inner(dir, prefix, suffix, flags)
|
||||||
|
try:
|
||||||
|
_os.unlink(name)
|
||||||
|
return _os.fdopen(fd, mode, bufsize)
|
||||||
|
except:
|
||||||
|
_os.close(fd)
|
||||||
|
raise
|
||||||
|
|
||||||
|
class SpooledTemporaryFile:
|
||||||
|
"""Temporary file wrapper, specialized to switch from
|
||||||
|
StringIO to a real file when it exceeds a certain size or
|
||||||
|
when a fileno is needed.
|
||||||
|
"""
|
||||||
|
_rolled = False
|
||||||
|
|
||||||
|
def __init__(self, max_size=0, mode='w+b', bufsize=-1,
|
||||||
|
suffix="", prefix=template, dir=None):
|
||||||
|
self._file = _StringIO()
|
||||||
|
self._max_size = max_size
|
||||||
|
self._rolled = False
|
||||||
|
self._TemporaryFileArgs = (mode, bufsize, suffix, prefix, dir)
|
||||||
|
|
||||||
|
def _check(self, file):
|
||||||
|
if self._rolled: return
|
||||||
|
max_size = self._max_size
|
||||||
|
if max_size and file.tell() > max_size:
|
||||||
|
self.rollover()
|
||||||
|
|
||||||
|
def rollover(self):
|
||||||
|
if self._rolled: return
|
||||||
|
file = self._file
|
||||||
|
newfile = self._file = TemporaryFile(*self._TemporaryFileArgs)
|
||||||
|
del self._TemporaryFileArgs
|
||||||
|
|
||||||
|
newfile.write(file.getvalue())
|
||||||
|
newfile.seek(file.tell(), 0)
|
||||||
|
|
||||||
|
self._rolled = True
|
||||||
|
|
||||||
|
# The method caching trick from NamedTemporaryFile
|
||||||
|
# won't work here, because _file may change from a
|
||||||
|
# _StringIO instance to a real file. So we list
|
||||||
|
# all the methods directly.
|
||||||
|
|
||||||
|
# Context management protocol
|
||||||
|
def __enter__(self):
|
||||||
|
if self._file.closed:
|
||||||
|
raise ValueError("Cannot enter context with closed file")
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __exit__(self, exc, value, tb):
|
||||||
|
self._file.close()
|
||||||
|
|
||||||
|
# file protocol
|
||||||
|
def __iter__(self):
|
||||||
|
return self._file.__iter__()
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
self._file.close()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def closed(self):
|
||||||
|
return self._file.closed
|
||||||
|
|
||||||
|
@property
|
||||||
|
def encoding(self):
|
||||||
|
return self._file.encoding
|
||||||
|
|
||||||
|
def fileno(self):
|
||||||
|
self.rollover()
|
||||||
|
return self._file.fileno()
|
||||||
|
|
||||||
|
def flush(self):
|
||||||
|
self._file.flush()
|
||||||
|
|
||||||
|
def isatty(self):
|
||||||
|
return self._file.isatty()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def mode(self):
|
||||||
|
return self._file.mode
|
||||||
|
|
||||||
|
@property
|
||||||
|
def name(self):
|
||||||
|
return self._file.name
|
||||||
|
|
||||||
|
@property
|
||||||
|
def newlines(self):
|
||||||
|
return self._file.newlines
|
||||||
|
|
||||||
|
def next(self):
|
||||||
|
return self._file.next
|
||||||
|
|
||||||
|
def read(self, *args):
|
||||||
|
return self._file.read(*args)
|
||||||
|
|
||||||
|
def readline(self, *args):
|
||||||
|
return self._file.readline(*args)
|
||||||
|
|
||||||
|
def readlines(self, *args):
|
||||||
|
return self._file.readlines(*args)
|
||||||
|
|
||||||
|
def seek(self, *args):
|
||||||
|
self._file.seek(*args)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def softspace(self):
|
||||||
|
return self._file.softspace
|
||||||
|
|
||||||
|
def tell(self):
|
||||||
|
return self._file.tell()
|
||||||
|
|
||||||
|
def truncate(self):
|
||||||
|
self._file.truncate()
|
||||||
|
|
||||||
|
def write(self, s):
|
||||||
|
file = self._file
|
||||||
|
rv = file.write(s)
|
||||||
|
self._check(file)
|
||||||
|
return rv
|
||||||
|
|
||||||
|
def writelines(self, iterable):
|
||||||
|
file = self._file
|
||||||
|
rv = file.writelines(iterable)
|
||||||
|
self._check(file)
|
||||||
|
return rv
|
||||||
|
|
||||||
|
def xreadlines(self, *args):
|
||||||
|
return self._file.xreadlines(*args)
|
BIN
icarus-miner/data/usr/lib/python2.6/lib-dynload/tempfile.pyc
Normal file
BIN
icarus-miner/data/usr/lib/python2.6/lib-dynload/tempfile.pyc
Normal file
Binary file not shown.
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user