# Abc

### Abc

CODE:
# -*- coding: utf-8 -*- """
Spyder Editor
This temporary script file is located here: /home/likewise-open/VITUNIVERSITY/16bce2219/.spyder2/.temp.py """
import numpy as np
a = np.array([[0,0,0.5,0,0,0,0,0.5,0,0],[0.5,0,0,0,0,0,0.5,0,0,0],[0,0,0,1,0,0,0,0,0,0], [0,0,0,0,0,1,0,0,0,0],[0.5,0,0,0,0,0,0,0,0.5,0],[0,0,0,0,0,0,0,0,0,1],[0,0,0,0,0,0,0,0,1,0], [0,0,0,0,0,1,0,0,0,0],[0,0,0,0,0,0,0,1,0,0],[0,0,1,0,0,0,0,0,0,0]])
n = 15
d = 0.85
d_matrix = np.array([[d]]*a.shape[1])
a_trans = a.T
for i in range(n):
a1 = np.matmul(a_trans, d_matrix)
d_matrix = a1
print('Page Ranks for matrix of size ' + str(a.shape[0])) print(d_matrix)

1. Implement and visualize k-means clustering algorithm and show the Demonstration of clustering process on any real time dataset
from copy import deepcopy
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt plt.rcParams['figure.figsize'] = (16, 9) plt.style.use('ggplot')
# Importing the dataset
data = pd.read_csv('xclara.csv') print("Input Data and Shape") print(data.shape)
# Getting the values and plotting it f1 = data['V1'].values
f2 = data['V2'].values
X = np.array(list(zip(f1, f2))) plt.scatter(f1, f2, c='black', s=7)
# Euclidean Distance Caculator def dist(a, b, ax=1):
return np.linalg.norm(a - b, axis=ax)
# Number of clusters
k=3
# X coordinates of random centroids
C_x = np.random.randint(0, np.max(X)-20, size=k) # Y coordinates of random centroids
C_y = np.random.randint(0, np.max(X)-20, size=k) C = np.array(list(zip(C_x, C_y)), dtype=np.float32) print("Initial Centroids")
print(C)
# Plotting along with the Centroids plt.scatter(f1, f2, c='#050505', s=7) plt.scatter(C_x, C_y, marker='*', s=200, c='g')
# To store the value of centroids when it updates C_old = np.zeros(C.shape)
# Cluster Lables(0, 1, 2)
clusters = np.zeros(len(X))
# Error func. - Distance between new centroids and old centroids error = dist(C, C_old, None)
# Loop will run till the error becomes zero
while error != 0:
# Assigning each value to its closest cluster for i in range(len(X)):
distances = dist(X[i], C)
cluster = np.argmin(distances) clusters[i] = cluster
# Storing the old centroid values
C_old = deepcopy(C)
# Finding the new centroids by taking the average value for i in range(k):
points = [X[j] for j in range(len(X)) if clusters[j] == i]

C[i] = np.mean(points, axis=0) error = dist(C, C_old, None)
colors = ['r', 'g', 'b', 'y', 'c', 'm'] fig, ax = plt.subplots()
for i in range(k):
points = np.array([X[j] for j in range(len(X)) if clusters[j] == i])
ax.scatter(points[:, 0], points[:, 1], s=7, c=colors[i]) ax.scatter(C[:, 0], C[:, 1], marker='*', s=200, c='#050505')
from sklearn.cluster import KMeans
# Number of clusters
kmeans = KMeans(n_clusters=3)
# Fitting the input data
kmeans = kmeans.fit(X)
# Getting the cluster labels
labels = kmeans.predict(X)
# Centroid values
centroids = kmeans.cluster_centers_
# Comparing with scikit-learn centroids print("Centroid values") print("Scratch")
print(C) # From Scratch print("sklearn")
print(centroids) # From sci-kit learn

import numpy as np X = np.array([[5,3],
[10,15], [15,12], [24,10], [30,30], [85,70], [71,80], [60,78], [70,55], [80,91],])
labels = range(1, 11)
plt.figure(figsize=(10, 7)) plt.subplots_adjust(bottom=0.1) plt.scatter(X[:,0],X[:,1], label='True Position') for label, x, y in zip(labels, X[:, 0], X[:, 1]):

plt.annotate( label,

xy=(x, y), xytext=(-3, 3),
textcoords='offset points', ha='right', va='bottom') plt.show()
from scipy.cluster.hierarchy import dendrogram, linkage from matplotlib import pyplot as plt
labelList = range(1, 11)
orientation='top', labels=labelList, distance_sort='descending', show_leaf_counts=True)
plt.show()

mport numpy as np
import pandas as pd

ratings_mean_count = pd.DataFrame(movie_data.groupby('title')['rating'].mean()) ratings_mean_count['rating_counts'] = pd.DataFrame(movie_data.groupby('title') ['rating'].count())

user_movie_rating = movie_data.pivot_table(index='userId', columns='title', values='rating')

movies_like_Jumanji= user_movie_rating.corrwith(Jumanji_ratings)
corr_Jumanji = pd.DataFrame(movies_like_Jumanji, columns=['Correlation']) corr_Jumanji.dropna(inplace=True)

3. a) Implement Association Rule Mining using Apriori to find frequent pattern. DATASET: FRENCH STORE DATA CODE:
import numpy as np import matplotlib.pyplot as plt import pandas as pd from apyori import apriori
rec = [] for i in range(0, 200):
rec.append([str(data.values[i,j]) for j in range(0, 20)])
association_rules = apriori(rec, min_support=0.03, min_confidence=0.2, min_lift=3, min_length=2)  association_results = list(association_rules)
cnt=1 for i in association_results:
print("\nRECORD",cnt,":\n",i) cnt+=1

3. b)Implement Association Rule Mining using FP Growth to find frequent pattern. CODE:
import pyfpgrowth  transactions = [[1, 2, 5],[2, 4],[2, 3],[1, 2, 4],[1, 3], [2, 3],[1, 3],[1, 2, 3, 5], [1, 2, 3]]  patterns = pyfpgrowth.find_frequent_patterns(transactions, 2)  rules = pyfpgrowth.generate_association_rules(patterns, 0.7)  print(patterns)  print(rules)
sin31423

Posts: 2
Joined: Mon Oct 29, 2018 3:09 am
Reputation: 0