import numpy as npimport mathimport randomimport time start = time.time()for i in range(10): list_1 = list(range(1,10000))for j in range(len(list_1)): list_1[j] = math.sin(list_1[j])print("使用纯Python用时{}s".format(time.time()-start)) start = time.time()for i in range(10): list_1 = np.array(np.arange(1,10000)) list_1 = np.sin(list_1)print("使用Numpy用时{}s".format(time.time()-start))
OpenCV 是一个的跨平台计算机视觉库,可以运行在 Linux、Windows 和 Mac OS 操作系统上。它轻量级而且高效——由一系列 C 函数和少量 C++ 类构成,同时也提供了 Python 接口,实现了图像处理和计算机视觉方面的很多通用算法。下面代码尝试使用一些简单的滤镜,包括图片的平滑处理、高斯模糊等:
import numpy as npimport cv2 as cvfrom matplotlib import pyplot as pltimg = cv.imread('h89817032p0.png')kernel = np.ones((5,5),np.float32)/25dst = cv.filter2D(img,-1,kernel)blur_1 = cv.GaussianBlur(img,(5,5),0)blur_2 = cv.bilateralFilter(img,9,75,75)plt.figure(figsize=(10,10))plt.subplot(221),plt.imshow(img[:,:,::-1]),plt.title('Original')plt.xticks([]), plt.yticks([])plt.subplot(222),plt.imshow(dst[:,:,::-1]),plt.title('Averaging')plt.xticks([]), plt.yticks([])plt.subplot(223),plt.imshow(blur_1[:,:,::-1]),plt.title('Gaussian')plt.xticks([]), plt.yticks([])plt.subplot(224),plt.imshow(blur_1[:,:,::-1]),plt.title('Bilateral')plt.xticks([]), plt.yticks([])plt.show()
from SimpleCV import Image, Color, Display# load an image from imgurimg = Image('http://i.imgur.com/lfAeZ4n.png')# use a keypoint detector to find areas of interestfeats = img.findKeypoints()# draw the list of keypointsfeats.draw(color=Color.RED)# show the resulting image. img.show()# apply the stuff we found to the image.output = img.applyLayers()# save the results.output.save('juniperfeats.png')
会报如下错误,因此不建议在 Python3 中使用:
SyntaxError: Missing parentheses in call to 'print'. Did you mean print('unit test')?
import numpy as npimport mahotasimport mahotas.demos from mahotas.thresholding import soft_thresholdfrom matplotlib import pyplot as pltfrom os import pathf = mahotas.demos.load('lena', as_grey=True)f = f[128:,128:]plt.gray()# Show the data:print("Fraction of zeros in original image: {0}".format(np.mean(f==0)))plt.imshow(f)plt.show()
from scipy import specialimport matplotlib.pyplot as pltimport numpy as np def drumhead_height(n, k, distance, angle, t): kth_zero = special.jn_zeros(n, k)[-1]return np.cos(t) * np.cos(n*angle) * special.jn(n, distance*kth_zero) theta = np.r_[0:2*np.pi:50j]radius = np.r_[0:1:50j]x = np.array([r * np.cos(theta) for r in radius])y = np.array([r * np.sin(theta) for r in radius])z = np.array([drumhead_height(1, 1, r, theta, 0.5) for r in radius]) fig = plt.figure()ax = fig.add_axes(rect=(0, 0.05, 0.95, 0.95), projection='3d')ax.plot_surface(x, y, z, rstride=1, cstride=1, cmap='RdBu_r', vmin=-0.5, vmax=0.5)ax.set_xlabel('X')ax.set_ylabel('Y')ax.set_xticks(np.arange(-1, 1.1, 0.5))ax.set_yticks(np.arange(-1, 1.1, 0.5))ax.set_zlabel('Z')plt.show()
11、NLTK
NLTK 是构建Python程序以处理自然语言的库。它为50多个语料库和词汇资源(如 WordNet )提供了易于使用的接口,以及一套用于分类、分词、词干、标记、解析和语义推理的文本处理库、工业级自然语言处理 (Natural Language Processing, NLP) 库的包装器。NLTK被称为 “a wonderful tool for teaching, and working in, computational linguistics using Python”。
import nltkfrom nltk.corpus import treebank # 首次使用需要下载nltk.download('punkt')nltk.download('averaged_perceptron_tagger')nltk.download('maxent_ne_chunker')nltk.download('words')nltk.download('treebank') sentence = """At eight o'clock on Thursday morning Arthur didn't feel very good."""# Tokenizetokens = nltk.word_tokenize(sentence)tagged = nltk.pos_tag(tokens) # Identify named entitiesentities = nltk.chunk.ne_chunk(tagged) # Display a parse treet = treebank.parsed_sents('wsj_0001.mrg')[0]t.draw()
import spacy texts = ["Net income was $9.4 million compared to the prior year of $2.7 million.","Revenue exceeded twelve billion dollars, with a loss of $1b.", ] nlp = spacy.load("en_core_web_sm")for doc in nlp.pipe(texts, disable=["tok2vec", "tagger", "parser", "attribute_ruler", "lemmatizer"]):# Do something with the doc hereprint([(ent.text, ent.label_) for ent in doc.ents]) nlp.pipe 生成 Doc 对象,因此我们可以对它们进行迭代并访问命名实体预测:[('$9.4 million', 'MONEY'), ('the prior year', 'DATE'), ('$2.7 million', 'MONEY')][('twelve billion dollars', 'MONEY'), ('1b', 'MONEY')]
# Beat tracking exampleimport librosa # 1. Get the file path to an included audio examplefilename = librosa.example('nutcracker') # 2. Load the audio as a waveform `y`# Store the sampling rate as `sr`y, sr = librosa.load(filename) # 3. Run the default beat trackertempo, beat_frames = librosa.beat.beat_track(y=y, sr=sr)print('Estimated tempo: {:.2f} beats per minute'.format(tempo)) # 4. Convert the frame indices of beat events into timestampsbeat_times = librosa.frames_to_time(beat_frames, sr=sr)
import numpy as npimport milkfeatures = np.random.rand(100,10)labels = np.zeros(100)features[50:] += .5labels[50:] = 1learner = milk.defaultclassifier()model = learner.train(features, labels) # Now you can use the model on new examples:example = np.random.rand(10)print(model.apply(example))example2 = np.random.rand(10)example2 += .5print(model.apply(example2))
import mxnet as mxfrom mxnet import gluonfrom mxnet.gluon import nnfrom mxnet import autograd as agimport mxnet.ndarray as F # 数据加载mnist = mx.test_utils.get_mnist()batch_size = 100train_data = mx.io.NDArrayIter(mnist['train_data'], mnist['train_label'], batch_size, shuffle=True)val_data = mx.io.NDArrayIter(mnist['test_data'], mnist['test_label'], batch_size) # CNN模型class Net(gluon.Block):def __init__(self, **kwargs):super(Net, self).__init__(**kwargs)self.conv1 = nn.Conv2D(20, kernel_size=(5,5))self.pool1 = nn.MaxPool2D(pool_size=(2,2), strides = (2,2))self.conv2 = nn.Conv2D(50, kernel_size=(5,5))self.pool2 = nn.MaxPool2D(pool_size=(2,2), strides = (2,2))self.fc1 = nn.Dense(500)self.fc2 = nn.Dense(10) def forward(self, x): x = self.pool1(F.tanh(self.conv1(x))) x = self.pool2(F.tanh(self.conv2(x)))# 0 means copy over size from corresponding dimension.# -1 means infer size from the rest of dimensions. x = x.reshape((0, -1)) x = F.tanh(self.fc1(x)) x = F.tanh(self.fc2(x))return xnet = Net()# 初始化与优化器定义# set the context on GPU is available otherwise CPUctx = [mx.gpu() if mx.test_utils.list_gpus() else mx.cpu()]net.initialize(mx.init.Xavier(magnitude=2.24), ctx=ctx)trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.03}) # 模型训练# Use Accuracy as the evaluation metric.metric = mx.metric.Accuracy()softmax_cross_entropy_loss = gluon.loss.SoftmaxCrossEntropyLoss() for i in range(epoch):# Reset the train data iterator. train_data.reset()for batch in train_data: data = gluon.utils.split_and_load(batch.data[0], ctx_list=ctx, batch_axis=0) label = gluon.utils.split_and_load(batch.label[0], ctx_list=ctx, batch_axis=0) outputs = []# Inside training scope with ag.record():for x, y in zip(data, label): z = net(x)# Computes softmax cross entropy loss. loss = softmax_cross_entropy_loss(z, y)# Backpropogate the error for one iteration. loss.backward() outputs.append(z) metric.update(label, outputs) trainer.step(batch.data[0].shape[0])# Gets the evaluation result. name, acc = metric.get()# Reset evaluation result to initial state. metric.reset() print('training acc at epoch %d: %s=%f'%(i, name, acc))