Opening an sqlite database

In [1]:
import sqlite3
dbfilename = "/opt/data/finefoods.db"
dbcon = sqlite3.connect(dbfilename)

SQL query.

In [2]:
cursor = dbcon.cursor()
sql = """
SELECT reviewhelpful, count(*)
FROM review
WHERE reviewscore < 2
GROUP BY reviewhelpful
"""
cursor.execute(sql)
Out[2]:
<sqlite3.Cursor at 0x7fa2bf71d7a0>

Results can then be pulled from the database.

In [3]:
from collections import Counter
ct = Counter()
for row_n, (score, count) in enumerate(cursor, 1):
    ct[score] = count
print(ct.most_common(n=3))
[('0/0', 12266), ('1/1', 4809), ('0/1', 3701)]

Some of the Python-side post-processing can be pushed back to the database

In [4]:
cursor = dbcon.cursor()
sql = """
SELECT reviewhelpful, count(*) AS ct
FROM review
WHERE reviewscore < 2
GROUP BY reviewhelpful
ORDER BY ct DESC
"""
cursor.execute(sql)
print(cursor.fetchmany(5))
[('0/0', 12266), ('1/1', 4809), ('0/1', 3701), ('1/2', 2718), ('2/2', 2123)]
In [5]:
sql = """
select reviewhelpful, count(*)
from (select T.reviewername
      from (select reviewername, count(*) as reviewer_count
            from review
            group by reviewername) as T
      where T.reviewer_count > 5) as U
inner join review
on U.reviewername = review.reviewername
where reviewscore < 2
group by reviewhelpful
"""
cursor.execute(sql)
Out[5]:
<sqlite3.Cursor at 0x7fa2bf71d880>

Opening the same database using an ORM (SQLalchemy).

In [6]:
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine

Base = automap_base()

# engine, suppose it has two tables 'user' and 'address' set up
engine = create_engine("sqlite:////opt/data/finefoods.db")

Use reflection on the SQL side to create the objects from the database.

In [7]:
Base.prepare(engine, reflect=True)
review = Base.classes.review

Make a query using SQLalchemy's methods.

In [8]:
session = Session(engine)
from sqlalchemy import func # SQL functions

query = (session
         .query(review.reviewhelpful,
                func.count(review.reviewhelpful))
         .filter(review.reviewscore < 2)
         .group_by(review.reviewhelpful)
         .order_by(func.count(review.reviewhelpful).desc()))

res = query.all()
res[:3]
Out[8]:
[('0/0', 12266), ('1/1', 4809), ('0/1', 3701)]

How were we doing before StackOverflow ?


Function composition is generating SQL code.

In [9]:
from sqlalchemy.dialects import sqlite
print(str(query.statement.compile(dialect=sqlite.dialect())))
SELECT review.reviewhelpful, count(review.reviewhelpful) AS count_1 
FROM review 
WHERE review.reviewscore < ? GROUP BY review.reviewhelpful ORDER BY count(review.reviewhelpful) DESC

With dplyr, an SQL table is a data table.

In [10]:
from rpy2.robjects import r

r_code = """
suppressMessages(require("dplyr"))
dbfilename <- '""" + dbfilename + """'
datasrc <- src_sqlite(dbfilename)
review_tbl <- tbl(datasrc, "review")

res <- filter(review_tbl,
              reviewscore < 2) %>%
       count(reviewhelpful) %>%
       arrange('desc(n)')
"""

res = r(r_code)
print(res)
Source: sqlite 3.8.6 [/opt/data/finefoods.db]
From: <derived table> [?? x 2]
Arrange: desc(n) 

   reviewhelpful     n
           (chr) (int)
1            0/0 12266
2            0/1  3701
3           0/10    55
4           0/11    66
5           0/12    73
6           0/13    32
7           0/14     8
8           0/15    19
9           0/16    13
10          0/17    15
..           ...   ...


We traded the knowledge of SQL for the knowledge of R.
a little

dplyr is not trying to map objects. It is focusing on databases as sources of tables.

In [11]:
from rpy2.robjects.lib import dplyr


datasrc  = dplyr.src_sqlite(dbfilename)
review_tbl = datasrc.get_table("review")

The table can be queried using the dplyr interface.

In [12]:
res =  (review_tbl
        .filter('reviewscore < 2')
        .count('reviewhelpful')
        .arrange('desc(n)'))

print(res)
Source: sqlite 3.8.6 [/opt/data/finefoods.db]
From: <derived table> [?? x 2]
Arrange: desc(n) 

   reviewhelpful     n
           (chr) (int)
1            0/0 12266
2            1/1  4809
3            0/1  3701
4            1/2  2718
5            2/2  2123
6            0/2  1702
7            2/3  1579
8            1/3  1342
9            3/3  1104
10           2/4   921
..           ...   ...

Strings are snippets of R code for dplyr.

R can be considered a domain-specific language (DSL) in the Python code.


The R package ggplot2 can also be used.

In [13]:
from rpy2.robjects import r, globalenv
import rpy2.robjects.lib.ggplot2 as gg

split_review = \
    'as.integer(sapply(strsplit(reviewhelpful, "/"), "[", %i))'

p = (gg.ggplot((res.collect().
                mutate(r1 = split_review % 1,
                       r2 = split_review % 2))) +
     gg.aes_string(x='r1+1', y='r2+1', size='n') +
     gg.geom_point(alpha = 0.5) +
     gg.scale_x_log10("review voted helpful") +
     gg.scale_y_log10("review") +
     gg.scale_size(trans="sqrt"))

Sending the resulting figure to a jupyter notebook output.

In [14]:
from rpy2.robjects.lib import grdevices

with grdevices.render_to_bytesio(grdevices.png, 
                                 width=800,
                                 height=600, 
                                 res=120) as b:
    p.plot()

from IPython.display import Image, display
data = b.getvalue()
display(Image(data=data, format='png', embed=True))

In [15]:
from bokeh.plotting import figure
from bokeh.plotting import figure, show, output_notebook
output_notebook()

plot = figure()
res =  (review_tbl
        .filter('reviewscore < 2')
        .count('reviewhelpful')
        .collect())

import math
plot.scatter(list(int(x.split('/')[0]) \
                  for x in res.rx2('reviewhelpful')),
             list(int(x.split('/')[1]) \
                  for x in res.rx2('reviewhelpful')),
             radius=list(math.log10(x/100) for x in res.rx2('n')),
             fill_alpha=.3)
Loading BokehJS ...
Out[15]:
<bokeh.models.renderers.GlyphRenderer at 0x7fa2d47d5e48>
In [16]:
show(plot)
Out[16]:

<Bokeh Notebook handle for In[16]>


Spark can be started from regular Python code.

In [17]:
import findspark
findspark.init()

import pyspark

conf = pyspark.conf.SparkConf()
(conf.setMaster('local[2]')
 .setAppName('ipython-notebook')
 .set("spark.executor.memory", "2g"))

sc = pyspark.SparkContext(conf=conf)
In [18]:
from pyspark.sql import SQLContext, Row
sqlcontext = SQLContext(sc)
cursor.execute('select * from review limit 10000')
review = \
    sqlcontext.createDataFrame(cursor,
                               tuple(x[0] for x in cursor.description))
review.registerTempTable("review")

SQL can be used to query the data.

In [19]:
sql = """
SELECT reviewhelpful, count(*) AS ct
FROM review
WHERE reviewscore < 2
GROUP BY reviewhelpful
ORDER BY ct DESC
"""
counts = sqlcontext.sql(sql)

The evaluation is only performed when the results are needed.

In [20]:
res = counts.collect()
res[:3]
Out[20]:
[Row(reviewhelpful='0/0', ct=241),
 Row(reviewhelpful='1/1', ct=87),
 Row(reviewhelpful='0/1', ct=80)]

Map/reduce is at the heart of Spark.

In [21]:
names = (review
         .flatMap(lambda p: p.reviewsummary.split())
         .map(lambda word: (word.lower(), 1))
         .reduceByKey(lambda a, b: a+b))
names.takeOrdered(15, key = lambda x: -x[1])
Out[21]:
[('great', 1259),
 ('the', 914),
 ('good', 837),
 ('for', 706),
 ('a', 695),
 ('not', 624),
 ('and', 581),
 ('best', 568),
 ('my', 451),
 ('coffee', 438),
 ('but', 401),
 ('love', 395),
 ('of', 391),
 ('to', 371),
 ('is', 350)]

In [22]:
sql = """
SELECT *
FROM review
WHERE reviewscore < 2
"""
lowscore = sqlcontext.sql(sql)
names = (lowscore
         .flatMap(lambda p: p.reviewsummary.split())
         .map(lambda word: (word.lower(), 1))
         .reduceByKey(lambda a, b: a+b))
names.takeOrdered(8, key = lambda x: -x[1])
Out[22]:
[('not', 132),
 ('the', 92),
 ('is', 50),
 ('of', 48),
 ('this', 46),
 ('for', 44),
 ('i', 43),
 ('a', 42)]

In [23]:
lst = names.takeOrdered(8, key = lambda x: -x[1])
from rpy2.robjects.vectors import StrVector, IntVector
dataf = dplyr.DataFrame({'word': StrVector([x[0] for x in lst]),
                         'count': IntVector([x[1] for x in lst])})
p = (gg.ggplot(dataf) +
     gg.geom_bar(gg.aes_string(x='word', y='count'),
                 stat="identity"))

with grdevices.render_to_bytesio(grdevices.png, 
                                 width=800,
                                 height=600, 
                                 res=120) as b:
    p.plot()
display(Image(data=b.getvalue(), format='png', embed=True))