import numpy as np import math import random import time
start = time.time() for i in range(10): list_1 = list(range(1,10000)) for j in range(len(list_1)): list_1[j] = math.sin(list_1[j]) print("使用纯Python用时{}s".format(time.time()-start))
start = time.time() for i in range(10): list_1 = np.array(np.arange(1,10000)) list_1 = np.sin(list_1) print("使用Numpy用时{}s".format(time.time()-start))
OpenCV 是一个的跨平台计算机视觉库,可以运行在 Linux、Windows 和 Mac OS 操作系统上。它轻量级而且高效——由一系列 C 函数和少量 C++ 类构成,同时也提供了 Python 接口,实现了图像处理和计算机视觉方面的很多通用算法。下面代码尝试使用一些简单的滤镜,包括图片的平滑处理、高斯模糊等:
from SimpleCV import Image, Color, Display # load an image from imgur img = Image('http://i.imgur.com/lfAeZ4n.png') # use a keypoint detector to find areas of interest feats = img.findKeypoints() # draw the list of keypoints feats.draw(color=Color.RED) # show the resulting image. img.show() # apply the stuff we found to the image. output = img.applyLayers() # save the results. output.save('juniperfeats.png')
会报如下错误,因此不建议在 Python3 中使用:
SyntaxError: Missing parentheses in call to 'print'. Did you mean print('unit test')?
import numpy as np import mahotas import mahotas.demos
from mahotas.thresholding import soft_threshold from matplotlib import pyplot as plt from os import path f = mahotas.demos.load('lena', as_grey=True) f = f[128:,128:] plt.gray() # Show the data: print("Fraction of zeros in original image: {0}".format(np.mean(f==0))) plt.imshow(f) plt.show()
import numpy as np import matplotlib.pyplot as plt
from sklearn.cluster import MiniBatchKMeans, KMeans from
sklearn.metrics.pairwise import pairwise_distances_argmin from sklearn.datasets import make_blobs
# We want to have the same colors for the same cluster from the # MiniBatchKMeans and the KMeans algorithm. Let's pair the cluster centers per # closest one. k_means_cluster_centers = k_means.cluster_centers_ order = pairwise_distances_argmin(k_means.cluster_centers_, mbk.cluster_centers_) mbk_means_cluster_centers = mbk.cluster_centers_[order]
theta = np.r_[0:2*np.pi:50j] radius = np.r_[0:1:50j] x = np.array([r * np.cos(theta) for r in radius]) y = np.array([r * np.sin(theta) for r in radius]) z = np.array([drumhead_height(1, 1, r, theta, 0.5) for r in radius])
NLTK 是构建Python程序以处理自然语言的库。它为50多个语料库和词汇资源(如 WordNet )提供了易于使用的接口,以及一套用于分类、分词、词干、标记、解析和语义推理的文本处理库、工业级自然语言处理 (Natural Language Processing, NLP) 库的包装器。NLTK被称为 “a wonderful tool for teaching, and working in, computational linguistics using Python”。
texts = [ "Net income was $9.4 million compared to the prior year of $2.7 million.",
"Revenue exceeded twelve billion dollars, with a loss of $1b.", ]
nlp = spacy.load("en_core_web_sm") for doc in nlp.pipe(texts, disable=["tok2vec", "tagger", "parser", "attribute_ruler", "lemmatizer"]): # Do something with the doc here print([(ent.text, ent.label_) for ent in doc.ents])
import numpy as np import milk features = np.random.rand(100,10) labels = np.zeros(100) features[50:] += .5 labels[50:] = 1 learner = milk.defaultclassifier() model = learner.train(features, labels)
# Now you can use the model on new examples: example = np.random.rand(10) print(model.apply(example)) example2 = np.random.rand(10) example2 += .5 print(model.apply(example2))
# 导入库 import torch from torch import nn from torch.utils.data import DataLoader from torchvision import datasets from torchvision.transforms import ToTensor, Lambda, Compose import matplotlib.pyplot as plt
import theano import
theano.tensor as T x = T.dvector('x') y = x ** 2 J, updates = theano.scan(lambda i, y,x : T.grad(y[i], x), sequences=T.arange(y.shape[0]), non_sequences=[y,x]) f = theano.function([x], J, updates=updates) f([4, 4])
defforward(self, x): x = self.pool1(F.tanh(self.conv1(x))) x = self.pool2(F.tanh(self.conv2(x))) # 0 means copy over size from corresponding dimension. # -1 means infer size from the rest of dimensions. x = x.reshape((0, -1)) x = F.tanh(self.fc1(x)) x = F.tanh(self.fc2(x)) return x net = Net() # 初始化与优化器定义 # set the context on GPU is available otherwise CPU ctx = [mx.gpu() if mx.test_utils.list_gpus() else mx.cpu()] net.initialize(mx.init.Xavier(magnitude=2.24), ctx=ctx) trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.03})
# 模型训练 # Use Accuracy as the evaluation metric. metric = mx.metric.Accuracy() softmax_cross_entropy_loss = gluon.loss.SoftmaxCrossEntropyLoss()
for i in range(epoch): # Reset the train data iterator. train_data.reset() for batch in train_data: data = gluon.utils.split_and_load(batch.data[0], ctx_list=ctx, batch_axis=0) label = gluon.utils.split_and_load(batch.label[0], ctx_list=ctx, batch_axis=0) outputs = [] # Inside training scope with ag.record(): for x, y in zip(data, label): z = net(x) # Computes softmax cross entropy loss. loss = softmax_cross_entropy_loss(z, y) # Backpropogate the error for one iteration. loss.backward() outputs.append(z) metric.update(label, outputs) trainer.step(batch.data[0].shape[0]) # Gets the evaluation result. name, acc = metric.get() # Reset evaluation result to initial state. metric.reset() print('training acc at epoch %d: %s=%f'%(i, name, acc))