I am here as an individual, long-active Open Source developer and data scientist. Views and opinions expressed are solely my own.
docker run --rm -it -p 8888:8888 \
rpy2/odsc-pdaibp-slides
Visit http://localhost:8888/
*
*: If docker-machine (Windows, OS X), the address is:
docker-machine ip [MACHINE]
cursor = dbcon.cursor()
sql = """
SELECT reviewhelpful, count(*)
FROM review
WHERE reviewscore < 2
GROUP BY reviewhelpful
"""
cursor.execute(sql)
C | C++ | bash | Javascript | Julia | Python | R | Scala | SQL |
Julia | ⇒ | Python | : | PyCall |
Julia | ⇒ | R | : | Rif or RCall |
[somewhere] | ⇒ | R | : | Rserve |
R | ⇒ | Python | : | rPython |
Polyglot data analysis a project/ensemble with:
Detailed account of raw materials and workdays for a basketry workshop. Clay, ca. 2040 BC (Ur III).
Source: wikipedia Marie-Lan Nguyen (2006) |
J. McAuley and J. Leskovec. From amateurs to connoisseurs: modeling the evolution of user expertise through online reviews. WWW, 2013. http://snap.stanford.edu/data/web-FineFoods.html
product/productId: B00...W review/userId: A1S...MSMR review/profileName: [some profile name] review/helpfulness: 0/0 review/score: 1.0 review/time: 1344556800 review/summary: Sad outcome review/text: Five minutes in, one tentacle was bitten off, ball inside cracked in half. Not durable enough to be a dog toy. Disappointed :(. So is the dog :(.
productid | VARCHAR |
reviewerid | VARCHAR |
reviewername | VARCHAR |
reviewhelpful | VARCHAR |
reviewscore | INTEGER |
reviewtime | INTEGER |
reviewsummary | VARCHAR |
reviewtext | VARCHAR |
import sqlite3
dbfilename = "/opt/data/finefoods.db"
dbcon = sqlite3.connect(dbfilename)
cursor = dbcon.cursor()
sql = """
SELECT reviewhelpful, count(*)
FROM review
WHERE reviewscore < 2
GROUP BY reviewhelpful
"""
cursor.execute(sql)
<sqlite3.Cursor at 0x7fd01c3be7a0>
from collections import Counter
ct = Counter()
for row_n, (score, count) in enumerate(cursor, 1):
ct[score] = count
print(ct.most_common(n=3))
[('0/0', 12266), ('1/1', 4809), ('0/1', 3701)]
sql = """
select reviewhelpful, count(*)
from (select T.reviewername
from (select reviewername, count(*) as reviewer_count
from review
group by reviewername) as T
where T.reviewer_count > 5) as U
inner join review
on U.reviewername = review.reviewername
where reviewscore < 2
group by reviewhelpful
"""
cursor.execute(sql)
<sqlite3.Cursor at 0x7fd01c3be880>
|
result = (Review
.select()
.where(Review.reviewscore < 2))
(example with SQLObject)
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine
Base = automap_base()
# engine, suppose it has two tables 'user' and 'address' set up
engine = create_engine("sqlite:////opt/data/finefoods.db")
Base.prepare(engine, reflect=True)
review = Base.classes.review
session = Session(engine)
from sqlalchemy import func # SQL functions
query = (session
.query(review.reviewhelpful,
func.count(review.reviewhelpful))
.filter(review.reviewscore < 2)
.group_by(review.reviewhelpful)
.order_by(func.count(review.reviewhelpful).desc()))
res = query.all()
res[:3]
[('0/0', 12266), ('1/1', 4809), ('0/1', 3701)]
from sqlalchemy.dialects import sqlite
print(str(query.statement.compile(dialect=sqlite.dialect())))
SELECT review.reviewhelpful, count(review.reviewhelpful) AS count_1 FROM review WHERE review.reviewscore < ? GROUP BY review.reviewhelpful ORDER BY count(review.reviewhelpful) DESC
mutate | modify/add column |
filter | filter rows |
select | select columns |
group_by | group rows |
summarize | summarize groups of rows |
datasrc <- src_sqlite(dbfilename)
review_tbl <- tbl(datasrc, "review")
res <- filter(review_tbl,
reviewscore < 2) %>%
count(reviewhelpful) %>%
arrange('desc(n)')
from rpy2.robjects.lib import dplyr
datasrc = dplyr.src_sqlite(dbfilename)
review_tbl = datasrc.get_table("review")
res = (review_tbl
.filter('reviewscore < 2')
.count('reviewhelpful')
.arrange('desc(n)'))
print(res)
Source: sqlite 3.8.6 [/opt/data/finefoods.db] From: <derived table> [?? x 2] Arrange: desc(n) reviewhelpful n (chr) (int) 1 0/0 12266 2 1/1 4809 3 0/1 3701 4 1/2 2718 5 2/2 2123 6 0/2 1702 7 2/3 1579 8 1/3 1342 9 3/3 1104 10 2/4 921 .. ... ...
from rpy2.robjects import r, globalenv
import rpy2.robjects.lib.ggplot2 as gg
split_review = \
'as.integer(sapply(strsplit(reviewhelpful, "/"), "[", %i))'
p = (gg.ggplot((res.collect().
mutate(r1 = split_review % 1,
r2 = split_review % 2))) +
gg.aes_string(x='r1+1', y='r2+1', size='n') +
gg.geom_point(alpha = 0.5) +
gg.scale_x_log10("review voted helpful") +
gg.scale_y_log10("review") +
gg.scale_size(trans="sqrt"))
from rpy2.robjects.lib import grdevices
with grdevices.render_to_bytesio(grdevices.png,
width=800,
height=600,
res=120) as b:
p.plot()
from IPython.display import Image, display
data = b.getvalue()
display(Image(data=data, format='png', embed=True))
from bokeh.plotting import figure
from bokeh.plotting import figure, show, output_notebook
output_notebook()
plot = figure()
res = (review_tbl
.filter('reviewscore < 2')
.count('reviewhelpful')
.collect())
import math
plot.scatter(list(int(x.split('/')[0]) \
for x in res.rx2('reviewhelpful')),
list(int(x.split('/')[1]) \
for x in res.rx2('reviewhelpful')),
radius=list(math.log10(x/100) for x in res.rx2('n')),
fill_alpha=.3)
show(plot)
<Bokeh Notebook handle for In[16]>
import findspark
findspark.init()
import pyspark
conf = pyspark.conf.SparkConf()
(conf.setMaster('local[2]')
.setAppName('ipython-notebook')
.set("spark.executor.memory", "2g"))
sc = pyspark.SparkContext(conf=conf)
from pyspark.sql import SQLContext, Row
sqlcontext = SQLContext(sc)
cursor.execute('select * from review limit 10000')
review = \
sqlcontext.createDataFrame(cursor,
tuple(x[0] for x in cursor.description))
review.registerTempTable("review")
The data is now distributed on a Spark cluster.
sql = """
SELECT reviewhelpful, count(*) AS ct
FROM review
WHERE reviewscore < 2
GROUP BY reviewhelpful
ORDER BY ct DESC
"""
counts = sqlcontext.sql(sql)
res = counts.collect()
res[:3]
[Row(reviewhelpful='0/0', ct=241), Row(reviewhelpful='1/1', ct=87), Row(reviewhelpful='0/1', ct=80)]
names = (review
.flatMap(lambda p: p.reviewsummary.split())
.map(lambda word: (word.lower(), 1))
.reduceByKey(lambda a, b: a+b))
names.takeOrdered(15, key = lambda x: -x[1])
[('great', 1259), ('the', 914), ('good', 837), ('for', 706), ('a', 695), ('not', 624), ('and', 581), ('best', 568), ('my', 451), ('coffee', 438), ('but', 401), ('love', 395), ('of', 391), ('to', 371), ('is', 350)]
sql = """
SELECT *
FROM review
WHERE reviewscore < 2
"""
lowscore = sqlcontext.sql(sql)
names = (lowscore
.flatMap(lambda p: p.reviewsummary.split())
.map(lambda word: (word.lower(), 1))
.reduceByKey(lambda a, b: a+b))
names.takeOrdered(8, key = lambda x: -x[1])
[('not', 132), ('the', 92), ('is', 50), ('of', 48), ('this', 46), ('for', 44), ('i', 43), ('a', 42)]
lst = names.takeOrdered(8, key = lambda x: -x[1])
from rpy2.robjects.vectors import StrVector, IntVector
dataf = dplyr.DataFrame({'word': StrVector([x[0] for x in lst]),
'count': IntVector([x[1] for x in lst])})
p = (gg.ggplot(dataf) +
gg.geom_bar(gg.aes_string(x='word', y='count'),
stat="identity"))
with grdevices.render_to_bytesio(grdevices.png,
width=800,
height=600,
res=120) as b:
p.plot()
display(Image(data=b.getvalue(), format='png', embed=True))