Opening an sqlite database
import sqlite3
dbfilename = "/opt/data/finefoods.db"
dbcon = sqlite3.connect(dbfilename)
SQL query.
cursor = dbcon.cursor()
sql = """
SELECT reviewhelpful, count(*)
FROM review
WHERE reviewscore < 2
GROUP BY reviewhelpful
"""
cursor.execute(sql)
Results can then be pulled from the database.
from collections import Counter
ct = Counter()
for row_n, (score, count) in enumerate(cursor, 1):
ct[score] = count
print(ct.most_common(n=3))
Some of the Python-side post-processing can be pushed back to the database
cursor = dbcon.cursor()
sql = """
SELECT reviewhelpful, count(*) AS ct
FROM review
WHERE reviewscore < 2
GROUP BY reviewhelpful
ORDER BY ct DESC
"""
cursor.execute(sql)
print(cursor.fetchmany(5))
sql = """
select reviewhelpful, count(*)
from (select T.reviewername
from (select reviewername, count(*) as reviewer_count
from review
group by reviewername) as T
where T.reviewer_count > 5) as U
inner join review
on U.reviewername = review.reviewername
where reviewscore < 2
group by reviewhelpful
"""
cursor.execute(sql)
Opening the same database using an ORM (SQLalchemy).
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine
Base = automap_base()
# engine, suppose it has two tables 'user' and 'address' set up
engine = create_engine("sqlite:////opt/data/finefoods.db")
Use reflection on the SQL side to create the objects from the database.
Base.prepare(engine, reflect=True)
review = Base.classes.review
Make a query using SQLalchemy's methods.
session = Session(engine)
from sqlalchemy import func # SQL functions
query = (session
.query(review.reviewhelpful,
func.count(review.reviewhelpful))
.filter(review.reviewscore < 2)
.group_by(review.reviewhelpful)
.order_by(func.count(review.reviewhelpful).desc()))
res = query.all()
res[:3]
How were we doing before StackOverflow ?
Function composition is generating SQL code.
from sqlalchemy.dialects import sqlite
print(str(query.statement.compile(dialect=sqlite.dialect())))
With dplyr, an SQL table is a data table.
from rpy2.robjects import r
r_code = """
suppressMessages(require("dplyr"))
dbfilename <- '""" + dbfilename + """'
datasrc <- src_sqlite(dbfilename)
review_tbl <- tbl(datasrc, "review")
res <- filter(review_tbl,
reviewscore < 2) %>%
count(reviewhelpful) %>%
arrange('desc(n)')
"""
res = r(r_code)
print(res)
We traded the knowledge of SQL for | knowledge of R. | |
a little |
dplyr is not trying to map objects. It is focusing on databases as sources of tables.
from rpy2.robjects.lib import dplyr
datasrc = dplyr.src_sqlite(dbfilename)
review_tbl = datasrc.get_table("review")
The table can be queried using the dplyr interface.
res = (review_tbl
.filter('reviewscore < 2')
.count('reviewhelpful')
.arrange('desc(n)'))
print(res)
Strings are snippets of R code for dplyr.
R can be considered a domain-specific language (DSL) in the Python code.
The R package ggplot2 can also be used.
from rpy2.robjects import r, globalenv
import rpy2.robjects.lib.ggplot2 as gg
split_review = \
'as.integer(sapply(strsplit(reviewhelpful, "/"), "[", %i))'
p = (gg.ggplot((res.collect().
mutate(r1 = split_review % 1,
r2 = split_review % 2))) +
gg.aes_string(x='r1+1', y='r2+1', size='n') +
gg.geom_point(alpha = 0.5) +
gg.scale_x_log10("review voted helpful") +
gg.scale_y_log10("review") +
gg.scale_size(trans="sqrt"))
Sending the resulting figure to a jupyter notebook output.
from rpy2.robjects.lib import grdevices
with grdevices.render_to_bytesio(grdevices.png,
width=800,
height=600,
res=120) as b:
p.plot()
from IPython.display import Image, display
data = b.getvalue()
display(Image(data=data, format='png', embed=True))
from bokeh.plotting import figure
from bokeh.plotting import figure, show, output_notebook
output_notebook()
plot = figure()
res = (review_tbl
.filter('reviewscore < 2')
.count('reviewhelpful')
.collect())
import math
plot.scatter(list(int(x.split('/')[0]) \
for x in res.rx2('reviewhelpful')),
list(int(x.split('/')[1]) \
for x in res.rx2('reviewhelpful')),
radius=list(math.log10(x/100) for x in res.rx2('n')),
fill_alpha=.3)
show(plot)
Spark can be started from regular Python code.
import findspark
findspark.init()
import pyspark
conf = pyspark.conf.SparkConf()
(conf.setMaster('local[2]')
.setAppName('ipython-notebook')
.set("spark.executor.memory", "2g"))
sc = pyspark.SparkContext(conf=conf)
from pyspark.sql import SQLContext, Row
sqlcontext = SQLContext(sc)
cursor.execute('select * from review limit 10000')
review = \
sqlcontext.createDataFrame(cursor,
tuple(x[0] for x in cursor.description))
review.registerTempTable("review")
SQL can be used to query the data.
sql = """
SELECT reviewhelpful, count(*) AS ct
FROM review
WHERE reviewscore < 2
GROUP BY reviewhelpful
ORDER BY ct DESC
"""
counts = sqlcontext.sql(sql)
The evaluation is only performed when the results are needed.
res = counts.collect()
res[:3]
Map/reduce is at the heart of Spark.
names = (review
.flatMap(lambda p: p.reviewsummary.split())
.map(lambda word: (word.lower(), 1))
.reduceByKey(lambda a, b: a+b))
names.takeOrdered(15, key = lambda x: -x[1])
sql = """
SELECT *
FROM review
WHERE reviewscore < 2
"""
lowscore = sqlcontext.sql(sql)
names = (lowscore
.flatMap(lambda p: p.reviewsummary.split())
.map(lambda word: (word.lower(), 1))
.reduceByKey(lambda a, b: a+b))
names.takeOrdered(8, key = lambda x: -x[1])
lst = names.takeOrdered(8, key = lambda x: -x[1])
from rpy2.robjects.vectors import StrVector, IntVector
dataf = dplyr.DataFrame({'word': StrVector([x[0] for x in lst]),
'count': IntVector([x[1] for x in lst])})
p = (gg.ggplot(dataf) +
gg.geom_bar(gg.aes_string(x='word', y='count'),
stat="identity"))
with grdevices.render_to_bytesio(grdevices.png,
width=800,
height=600,
res=120) as b:
p.plot()
display(Image(data=b.getvalue(), format='png', embed=True))