快乐学习
前程无忧、中华英才非你莫属!

Pandas-Cookbook

# -*-coding:utf-8-*-

# ---------------------
# Chapter 1 - Reading from a CSV.ipynb
# ---------------------

import pandas as pd
import matplotlib.pyplot as plt
import numpy as np

"""
    1.1 Reading data from a csv file
"""
broken_df = pd.read_csv('data/bikes.csv')

# seq为分隔符,encoding为编码,index_col为索引列编号,dayfirst为日期格式,parse_dates为日期解析
fixed_df = pd.read_csv('data/bikes.csv', sep=';', encoding='latin1', parse_dates=['Date'], dayfirst=True,
                       index_col='Date')
fixed_df[:3]
type(fixed_df)  # <class 'pandas.core.frame.DataFrame'>

"""
    1.2 Selecting a column
"""
fixed_df['Berri 1']

"""
    1.3 Plotting a column
"""
fixed_df['Berri 1'].plot()
plt.show()

fixed_df.plot(figsize=(15, 10))
plt.show()

"""
    1.4 Putting all that together
"""
df = pd.read_csv('data/bikes.csv', sep=';', encoding='latin1', parse_dates=['Date'], dayfirst=True, index_col='Date')
df['Berri 1'].plot()
plt.show()

# ---------------------
# Chapter 2 - Selecting data & finding the most common complaint type.ipynb
# ---------------------

pd.set_option('display.line_width', 5000)
pd.set_option('display.max_columns', 60)
complaints = pd.read_csv('data/311-service-requests.csv')
complaints.head()
complaints['Complaint Type']
complaints[:3]
complaints['Complaint Type'][:3]
complaints[:3]['Complaint Type']
complaints[['Complaint Type', 'Borough']]
complaints[['Complaint Type', 'Borough']][:10]
complaints_counts = complaints['Complaint Type'].value_counts()  # 计算各个元素的数量
complaints_counts[:10]
complaints_counts[:10].plot(kind='bar')
plt.show()

# ---------------------
# Chapter 3 - Which borough has the most noise complaints (or, more selecting data).ipynb
# ---------------------

complaints = pd.read_csv('data/311-service-requests.csv')  # 读取csv文件
complaints.head()
complaints[:5]

'''
    3.1 Selecting only noise complaints
'''
noise_complaints = complaints[complaints['Complaint Type'] == 'Noise - Street/Sidewalk']
noise_complaints[:3]
complaints['Complaint Type'] == 'Noise - Street/Sidewalk'  # 返回True False

is_noise = complaints['Complaint Type'] == 'Noise - Street/Sidewalk'
in_brooklyn = complaints['Borough'] == 'BROOKLYN'
complaints[is_noise & in_brooklyn][:5]

complaints[is_noise & in_brooklyn][['Complaint Type', 'Borough', 'Created Date', 'Descriptor']][:10]
'''
    3.2 A digression about numpy arrays
'''
pf = pd.Series([1, 2, 3])
pf
pf.values
pf.index
nf = np.array([1, 2, 3])
nf
nf != 2
nf[nf != 2]

'''
    3.3 So, which borough has the most noise complaints?
'''
is_noise = complaints['Complaint Type'] == "Noise - Street/Sidewalk"
noise_complaints = complaints[is_noise]
noise_complaints['Borough'].value_counts()

noise_complaint_counts = noise_complaints['Borough'].value_counts()
complaint_counts = complaints['Borough'].value_counts()
noise_complaint_counts / complaint_counts
noise_complaint_counts / complaint_counts.astype(float)
(noise_complaint_counts / complaint_counts.astype(float)).plot(kind='bar')
plt.show()

# ---------------------
# Chapter 4: Find out on which weekday people bike the most with groupby and aggregate
# ---------------------

bikes = pd.read_csv('data/bikes.csv', sep=';', encoding='latin1', index_col='Date', parse_dates=['Date'],
                    dayfirst=True)
bikes.head()

bikes['Berri 1'].plot()  # 绘制曲线
plt.show()

berri_bikes = bikes[['Berri 1']].copy()  # 将某一列的数据复制出来,单独为一列
berri_bikes[:5]
berri_bikes.index
berri_bikes.index.day
berri_bikes.index.weekday
berri_bikes.loc[:, 'weekday'] = berri_bikes.index.weekday
berri_bikes[:5]

"""
    4.2 Adding up the cyclists by weekday
"""
"""
    使用DataFrames中的.groupby()方法进行分组,并计算每一组的数量和
"""
weekday_counts = berri_bikes.groupby('weekday').aggregate(sum)
weekday_counts

weekday_counts.index = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
weekday_counts

weekday_counts.plot(kind='bar')
plt.show()

"""
    4.3 Putting it together
"""
"""
    所有代码汇总
"""
bikes = pd.read_csv('data/bikes.csv', sep=';', encoding='latin1', index_col='Date', dayfirst=True,
                    parse_dates=['Date'])
berri_bikes = bikes[['Berri 1']].copy()
berri_bikes.loc[:, 'weekday'] = berri_bikes.index.weekday

weekday_counts = berri_bikes.groupby('weekday').aggregate(sum)
weekday_counts.index = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
weekday_counts.plot(kind='bar')
plt.show()

"""
    分析:
        主要是计算时间,分组处理一周时间,将每周对应的数量加到对应的天上
    方法:
        1、csv数据的读取
        2、列数据的复制
        3、将数据按照一周来进行划分
        4、按照一周进行分组处理数据,修改索引
        5、直方图展示
"""

# ---------------------
# Chapter 5: Combining dataframes and scraping Canadian weather data
# ---------------------

'''
    Summary
'''
weather_2012_final = pd.read_csv('data/weather_2012.csv', index_col='Date/Time')
weather_2012_final.head()
weather_2012_final['Temp (C)'].plot(figsize=(15, 6))
plt.show()

'''
    5.1 Downloading one month of weather data
'''

url_template = "http://climate.weather.gc.ca/climate_data/bulk_data_e.html?format=csv&stationID=31407&Year={year}&Month={month}&timeframe=1&submit=%E4%B8%8B%E8%BD%BD%E6%95%B0%E6%8D%AE";;
url = url_template.format(month=3, year=2012)
weather_mar2012 = pd.read_csv(url, skiprows=15, index_col='Date/Time', parse_dates=True, encoding='latin1', header=0)

weather_mar2012

weather_mar2012['Temp (°C)'].plot(figsize=(15, 5))  # 图形展示温度变化情况
plt.show()

weather_mar2012.columns = [
    u'Year', u'Month', u'Day', u'Time', u'Data Quality', u'Temp (°C)',
    u'Temp Flag', u'Dew Point Temp (C)', u'Dew Point Temp Flag',
    u'Rel Hum (%)', u'Rel Hum Flag', u'Wind Dir (10s deg)', u'Wind Dir Flag',
    u'Wind Spd (km/h)', u'Wind Spd Flag', u'Visibility (km)', u'Visibility Flag',
    u'Stn Press (kPa)', u'Stn Press Flag', u'Hmdx', u'Hmdx Flag', u'Wind Chill',
    u'Wind Chill Flag', u'Weather']

# weather_mar2012 = weather_mar2012.dropna(axis=1, how='any')  # drop the column if any value is null 删除空列
weather_mar2012[:5]

# weather_mar2012 = weather_mar2012.drop(['Year', 'Month', 'Day', 'Time'], axis=1)

weather_mar2012

'''
    5.2 Plotting the temperature by hour of day
'''
temperatures = weather_mar2012[['Temp (°C)']].copy()
temperatures.head()

temperatures.loc[:, 'Hour'] = weather_mar2012.index.hour
temperatures.groupby('Hour').aggregate(np.median).plot()
plt.show()

'''
    5.3 Getting the whole year of data
'''

def download_weather_month(year, month):
    if month == 1:
        year += 1
    url = url_template.format(year=year, month=month)
    weather_data = pd.read_csv(url, skiprows=15, index_col='Date/Time', parse_dates=True, header=True)
    weather_data = weather_data.dropna(axis=1)
    weather_data.columns = [col.replace('\xb0', '') for col in weather_data.columns]
    weather_data = weather_data.drop(['Year', 'Day', 'Month', 'Time', 'Data Quality'], axis=1)
    return weather_data

download_weather_month(2012, 1)[:5]

data_by_month = [download_weather_month(2012, i) for i in range(1, 12)]  # 所有月份

weather_2012 = pd.concat(data_by_month)
weather_2012

'''
    5.4 Saving to a CSV
'''
weather_2012.to_csv('data/weather_2012.csv')

# ---------------------
# Chapter 6 - String Operations- Which month was the snowiest.ipynb
# ---------------------

weather_2012 = pd.read_csv('data/weather_2012.csv', encoding='latin1', index_col='Date/Time', parse_dates=True)
weather_2012.head()

'''
    6.1 String operations
'''
weather_description = weather_2012['Weather']  # 取出天气那一列

# 天气是否为snow,若天气为snow,返回True,否则返回False;str将类型转换成字符串形式,利于字符串的匹配、替换和截取
is_snowing = weather_description.str.contains('Snow')

is_snowing = is_snowing.astype(float)

is_snowing.plot()  # 将一年中下雪的天全部显示出来
plt.show()

'''
    6.2 Use resampling to find the snowiest month 寻找下雪最多的月份
'''
'''
    每个月份的平均气温,可以使用resample()方法来实现
'''
weather_2012['Temp (C)'].resample('M', how=np.median).plot(kind='bar')  # 平均气温
plt.show()

# 将天气情况使用0和1来表示,若为snow,也就是True,则返回1,否则用0表示
is_snowing.astype(float)[:10]  # astype用于类型转换,bool类型转换为float类型

# 使用resample()查找出每个月下雪的可能性,用百分比来表示
is_snowing.astype(float).resample('M', how=np.mean)  # 'M':表示按月的时间频率

is_snowing.astype(float).resample('M', how=np.mean).plot(kind='bar')
plt.show()

'''
    6.3 Plotting temperature and snowiness stats together  温度和下雪一起分析
'''
temperature = weather_2012['Temp (C)'].resample('M', how=np.median)  # 平均温度
is_snowing = weather_2012['Weather'].str.contains('Snow')
snowiness = is_snowing.astype(float).resample('M', how=np.mean)  # 下雪比例

temperature.name = 'Temperature'
snowiness.name = 'Snowiness'

stats = pd.concat([temperature, snowiness], axis=1)
stats
stats.plot(kind='bar')  # 下雪比例在图示中显示太小,所以这里不合理
plt.show()

stats.plot(kind='bar', subplots=True, figsize=(15, 10))  # 将两张图放到一个平面上,分开放,这样就能合理的进行图像展示
plt.show()

# subplot()作用:将多个figure放到一个平面上

# ---------------------
# Chapter 7 - Cleaning up messy data.ipynb  清理垃圾数据
# ---------------------

requests = pd.read_csv('data/311-service-requests.csv')
# requests.head()

'''
    7.1 How do we know if it's messy?
'''
zip = requests['Incident Zip'].unique()  # unique()用于查看所有的值
# zip
'''
    zip中存在的问题:
        1、数据类型问题,有些是字符串型,有些是浮点型
        2、有一些值不存在nan
        3、有些值不正确 83  29616-0759
        4、有N/A值,pandas不能够识别,'N/A','NO CLUE'
    处理方法:
        1、使'N/A','NO CLUE'变成规则的nan
        2、使所有格式都变成字符串
'''

'''
    7.3 Fixing the nan values and string/float confusion
'''
na_value = ['N/A', 'NO CLUE', 'O', 'nan']
requests = pd.read_csv('data/311-service-requests.csv', na_values=na_value, dtype={'Incident Zip': str})
# 读取csv文件时,将异常值设置为空值,将数据类型全部转换为字符串类型
zip = requests['Incident Zip'].copy()
# zip.unique()

'''
    7.4 What's up with the dashes? 处理数字之间的横杠29616-0759
'''
row_with_dashs = requests['Incident Zip'].str.contains('-').fillna(False)  # 将带横杠的全部提取出来
# len(requests[row_with_dashs])
# requests[row_with_dashs]

requests['Incident Zip'][row_with_dashs] = np.nan  # 将带横杠的全部转换为空值
# requests['Incident Zip'].unique()

long_zip_codes = requests['Incident Zip'].str.len() > 5
# requests['Incident Zip'][long_zip_codes].unique()

requests['Incident Zip'] = requests['Incident Zip'].str.slice(0, 5)  # slice()获取字符串的指定长度
# requests['Incident Zip'] = requests['Incident Zip'].str[0:5]
# requests['Incident Zip'].unique()

# requests[requests['Incident Zip']] == '00000'

zero_zips = requests['Incident Zip'] == '00000'
requests.loc[zero_zips, 'Incident Zip'] = np.nan

unique_zips = requests['Incident Zip'].unique()
unique_zips.sort()  # 排序
unique_zips

zips = requests['Incident Zip']
is_close = zips.str.startswith('0') | zips.str.startswith('1')  # zip以0或1开头
is_far = ~(is_close) & zips.notnull()

zips[is_far]

requests[is_far][['Incident Zip', 'Descriptor', 'City']].sort('Incident Zip')

requests['City'].str.upper().value_counts()  # 城市名转换为大写的,并且统计城市的数量

'''
    7.5 Putting it together
'''
# 异常值处理及csv文件的读取
na_values = ['NO CLUE', 'N/A', '0']
requests = pd.read_csv('data/311-service-requests.csv',
                       na_values=na_values,
                       dtype={'Incident Zip': str})

# 将邮政编码的位数固定为5位
def fix_zip_codes(zips):
    zips = zips.str.slice(0, 5)

    zero_zips = zips == '00000'
    zips[zero_zips] = np.nan

    return zips

requests['Incident Zip'] = fix_zip_codes(requests['Incident Zip'])
requests['Incident Zip'].unique()

# ---------------------
# Chapter 8 - How to deal with timestamps.ipynb
# ---------------------

'''
    8.1 Parsing Unix timestamps
'''
popcon = pd.read_csv('data/popularity-contest', sep=' ')
#  popcon.head()
popcon.columns = ['atime', 'ctime', 'package-name', 'mru-program', 'tag']
#  popcon[:5]
popcon['atime'].dtype

popcon['atime'] = popcon['atime'].astype(int)
#  popcon['atime'][:5]
# popcon['ctime'] = popcon['ctime'].astype(int)
popcon['atime'] = pd.to_datetime(popcon['atime'])
# popcon['ctime'] = pd.to_datetime(popcon['ctime'], unit='s')
#  popcon['atime'][:5]

popcon = popcon[popcon['atime'] > '1970-01-01']
nonlibraries = popcon[~popcon['package-name'].str.contains('lib')]
nonlibraries.sort('ctime', ascending=False)[:10]

# ---------------------
# Chapter 9 - Loading data from SQL databases.ipynb
# ---------------------

import sqlite3
import pandas as pd
"""
    pandas can read from HTML,JSON,SQL,EXCEL,HDF5,Stata, and a few other things.

    Read data from a SQL database using the pd.read_sql function.

    read_sql take 2 arguments: a SELECT statement, and s database connection object.

    This is great because it means you can read from any kind of SQL database,
    it doesn't matter if it's MySQL,SQLite,PostgreSQL,or something else.
"""
"""
    9.1 Reading data from SQL databases  读取数据
"""
con = sqlite3.connect('../data/weather_2012.sqlite')
df = pd.read_sql('select * from weather_2012 LIMIT 3', con, index_col='id')  # 设置id索引
#  df
df = pd.read_sql('select * from weather_2012 LIMIT 3', con, index_col=['id', 'date_time'])  # 设置双重索引
#  df

"""
    9.2 Writing to a SQLite database  写入数据
"""
# weather_df = pd.read_csv('../data/weather_2012.csv')
# con = sqlite3.connect('../data/test_db.sqlite')
# con.execute('drop table if exists weather_2012')
# weather_df.to_sql('weather_2012', con)

con = sqlite3.connect('../data/test_db.sqlite')
df = pd.read_sql('select * from weather_2012 LIMIT 3', con, index_col='index')
#  df

con = sqlite3.connect('../data/test_db.sqlite')
df = pd.read_sql('select * from weather_2012 order by Weather LIMIT 3', con)
df

"""
    sqlite3 database:连接数据库-->sqlite3.connect()
    PostgreSQL database:连接数据库-->psycopg2.connect()
    MySQL database:连接数据库-->MySQLdb.connect()
"""

"""
    9.3 Connecting to other kinds of database
"""
import MySQLdb
con = MySQLdb.connect(host='localhost', db='test')

import psycopg2
con = psycopg2.connect(host='localhost')
打赏
赞(0) 打赏
未经允许不得转载:同乐学堂 » Pandas-Cookbook

特别的技术,给特别的你!

联系QQ:1071235258QQ群:710045715

觉得文章有用就打赏一下文章作者

非常感谢你的打赏,我们将继续给力更多优质内容,让我们一起创建更加美好的网络世界!

支付宝扫一扫打赏

微信扫一扫打赏

error: Sorry,暂时内容不可复制!