-
Notifications
You must be signed in to change notification settings - Fork 0
/
RestaurantRevenue.py
64 lines (48 loc) · 1.98 KB
/
RestaurantRevenue.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
import cPickle
import numpy as np
import pandas as pd
from sklearn import preprocessing
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.feature_extraction import DictVectorizer
from datetime import datetime
#Load training data
class PredictRevenue:
def __init__(self,Type, Date, ZipCode, COuntry, ParkingSpace):
train = pd.read_csv('train.csv')
test = pd.read_csv('test.csv')
vec = DictVectorizer()
def diff_dates_2015(date_x):
date_format = "%m/%d/%Y"
x = datetime.strptime(date_x, date_format)
y = datetime.strptime('01/01/2015', date_format)
delta = y - x
return delta.days
train['Open Date'] = train['Open Date'].apply(lambda x: diff_dates_2015(x))
test['Open Date'] = test['Open Date'].apply(lambda x: diff_dates_2015(x))
#Extract Features.to
train_new = vec.fit_transform(train[['City','City Group','Type']].T.to_dict().values()).todense()
test_new = vec.transform(test[['City','City Group','Type']].T.to_dict().values()).todense()
print train_new
print test_new
target = train['revenue']
train = train.drop('revenue',axis=1)
p = ['P' + str(i) for i in range(1,38)]
train_p = train[p]
test_p = test[p]
#adding new columns to train_p
train = np.hstack((train_new,train_p))
test = np.hstack((test_new,test_p))
#Setup Random Forest
clf = RandomForestRegressor(n_estimators=200)
clf.fit(train,target)
with open('model', 'wb') as f:
cPickle.dump(clf, f)
print clf.feature_importances_
test_revenue = clf.predict(test)
print 'test_revenue' + str(test_revenue)
sub = pd.read_csv('ssub.csv')
sub['Prediction'] = test_revenue
sub.to_csv('RandomForest.csv', index = False)
'''
A random forest is a meta estimator that fits a number of decision tree classifiers on various sub-samples of the dataset and use averaging to improve the predictive accuracy and control over-fitting. The sub-sample size is always the same as the original input sample size but the samples are drawn with replacement if bootstrap=True (default
'''