这里收录了我的Python相关笔记.
常见命令
quit()	停止,退出
def()	声明函数
max()	获取最大值
min()	获取最小值
type()	获取类型
float()	转换为浮点数
int()	转换为整数
return()	返回
break	跳出循环
continue	继续下一次的循环
sorted()	排序
dir()
try/except 捕获错误
1
2
3
4
5
6
  | 
word = 'Hello world!'
try:
  word1=int(word)
except:
  word1=-1
print('word',word1)
  | 
 
Files
open(file, mode=‘r’) 	方法用于打开一个文件,并返回文件对象。
数组
[ ]	括起来
list()	创建数组
list.append(obj)	在列表末尾添加新的对象
字典
{}	括起来
dict()	创建字典
dict.get(key, default=None)	返回指定键的值,如果键不在字典中返回 default 设置的默认值
dict.keys()	返回一个视图对象
dict.values()	返回一个视图对象
dict.items()	以列表返回一个视图对象
多元组(不可改变)
()	括起来
只有两个方法
tuple.count()
tuple.index()
正则表达式
import re 引入正则库
'@([^ ]*)' ( )内的正则表达式表示需要返回的数据 @不返回
^代表从头开始  [^...] 方括号内的^代表除此之外
网络编程
socket
import socket 引用socket库
建立TCP socket:
1
2
3
  | 
import socket
mysock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
mysock.connect(('data.pr4e.org', 80))
  | 
 
请求:
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
  | 
import socket
mysock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
mysock.connect(('data.pr4e.org', 80))
cmd ='GET http://data.pr4e.org/romeo.txt HTTP/1.0\r\n\r\n'.encode()
mysock.send(cmd)
while True:
    data = mysock.recv(512)
    if (len(data) < 1) :
        break
    print(data.decode())
mysock.close()
  | 
 
import urllib.request,urllib.parse,urllib.error 引用urllib库
1
2
3
4
5
6
7
  | 
import urllib.request
import urllib.parse
import urllib.error
fhand = urllib.request.urlopen('http://www.dr-chuck.com/page1.htm')
for line in fhand:
    print(line.decode().strip())
  | 
 
 
安装方式:
pip install beautifulsoup4或pip3 install beautifulsoup4来安装 
- 也可以下载tar压缩包,将
bs4/目录放入几乎任何Python应用程序(或您的库路径),并立即开始使用它 
引用
from bs4 import BeautifulSoup 引用
http
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
  | 
import urllib.request, urllib.parse, urllib.error
from bs4 import BeautifulSoup
url = input('Enter - ')
html = urllib.request.urlopen(url).read()
soup = BeautifulSoup(html, 'html.parser')
# Retrieve all of the anchor tags
tags = soup('a')
for tag in tags:
    print(tag.get('href', None))
  | 
 
https
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
  | 
import urllib.request, urllib.parse, urllib.error
from bs4 import BeautifulSoup
import ssl
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
url = input('Enter - ')
html = urllib.request.urlopen(url, context=ctx).read()
soup = BeautifulSoup(html, 'html.parser')
# Retrieve all of the anchor tags
tags = soup('a')
for tag in tags:
    print(tag.get('href', None))
  | 
 
xml
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
  | 
import xml.etree.ElementTree as ET
data = '''<person>
            <name>Chuck</name>
            <phone type="intl">+1 734 303 4456</phone>
            <email hide="yes"/>
          </person>'''
tree = ET.fromstring(data)
print('Name:', tree.find('name').text)
print('Attr: ', tree.find('email').get('hide'))
  | 
 
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
  | 
import xml.etree.ElementTree as ET
input = '''<stuff>
    <users>
        <user x="2">
            <id>001</id>
            <name>Chuck</name> 
        </user>
        <user x="7">
            <id>009</id>
            <name>Brent</name> 
        </user>
    </users> 
</stuff>'''
stuff = ET.fromstring(input)
lst = stuff.findall('users/user')
print('User count:', len(lst))
for item in lst:
    print('Name:', item.find('name').text)
    print('Id:', item.find('id').text)
    print('Attribute:', item.get("x"))
  | 
 
json
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
  | 
import json
data = '''{
  "name" : "Chuck",
  "phone" : {
    "type" : "intl",
    "number" : "+1 734 303 4456"
   },
   "email" : {
     "hide" : "yes"
   }
}'''
info = json.loads(data)
print('Name:',info["name"])
print('Hide:',info["email"]["hide"])
  | 
 
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
  | 
import json
input = '''[
  { "id" : "001",
    "x" : "2",
    "name" : "Chuck"
  } ,
  { "id" : "009",
    "x" : "7",
    "name" : "Chuck"
  }
]'''
info = json.loads(input)
print('User count:', len(info))
for item in info:
    print('Name', item['name'])
    print('Id', item['id'])
    print('Attribute', item['x'])
  | 
 
API
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
  | 
import urllib.request, urllib.parse, urllib.error
import twurl
import json
TWITTER_URL = 'https://api.twitter.com/1.1/friends/list.json'
while True:
    print('')
    acct = input('Enter Twitter Account:')
    if (len(acct) < 1): break
    url = twurl.augment(TWITTER_URL,
                        {'screen_name': acct, 'count': '5'})
    print('Retrieving', url)
    connection = urllib.request.urlopen(url)
    data = connection.read().decode()
    headers = dict(connection.getheaders())
    print('Remaining', headers['x-rate-limit-remaining'])
    js = json.loads(data)
    print(json.dumps(js, indent=4))
    for u in js['users']:
        print(u['screen_name'])
        s = u['status']['text']
        print('  ', s[:50])
  | 
 
数据库
多条代码之间要加";“号
建立table
1
2
3
4
  | 
CREATE TABLE Users( 
  name VARCHAR(128), 
  email VARCHAR(128)
)
  | 
 
删除table
1
  | 
DROP TABLE IF EXISTS Artist
  | 
 
INSERT 插入
1
  | 
INSERT INTO Users (name, email) VALUES ('Kristin', ' kf@umich.edu ')
  | 
 
DELETE 删除数据
1
  | 
DELETE FROM Users WHERE email='ted@umich.edu'
  | 
 
UPDATE 更新
1
  | 
UPDATE Users SET name='Charles' WHERE email='csev@umich.edu'
  | 
 
SELECT 检索记录
1
2
  | 
SELECT * FROM Users
SELECT * FROM Users WHERE email=' csev@umich.edu '
  | 
 
ORDER BY 排序
1
2
  | 
SELECT * FROM Users ORDER BY email
SELECT * FROM Users ORDER BY name DESC
  | 
 
导入
连接join
1
  | 
select Album.title, Artist.name from Album join Artist on Album.artist_id = Artist.id
  |