Bubble Sort
def bubble_sort(list):
for n in range(len(list) - 1, 0, -1):
for j in range(n):
if list[j] > list[j+1]:
list[j], list[j+1] = list[j+1], list[j]
#time O(n^2) space O(1)
Selection Sort
def selection_sort(list)
for i in range(len(list) - 1, 0, -1):
max_index = i
for j in range(i)
if list[j] > list[max_index]:
max_index = j
list[i], list[max_index] = list[max_index], list[i]
#time O(n) space O(1)
Insertion Sort
search : O(n) or O(logn)
insert: O(n)
time O(n^2) space O(1)
def insertion_sort(list):
for i in range(1, len(list)):
cur = list[i]
j = i
while j > 0 and cur < list[j-1]
list[j] = list[j-1]
j -= 1
lsit[j] = cur
recursion: fibonacci sequence
time O(2^n) space O(n)
Merge Sort
#time O(nlogn)一共logn层 每层n次操作
#space O(n + logn) = O(n)
def merge(a, b):
c = []
index_a = index_b = 0
while index_a < len(a) and index_b < len(b):
if a[index_a] < b[index_b]:
c.append(a[index_a])
index_a += 1
else:
c.append(b[index_b])
index_b += 1
if index_a < len(a):
c.extend(a[index_a:])
if index_b < len(b):
c.extend(b[index_b:])
return c
def merge_sort(x):
if len(x) == 0 or len(x) == 1:
return x
middle = len(x) / 2
a = merge_sort(x[:middle])
b = merge_sort(x[middle:])
return merge(a, b)
Quick Sort
#time: worst: O(n^2) average/best: O(nlogn) range: n*(logn~n)
#space worst: O(n) average/best: O(logn) range: logn~n
from random import randrange
def partition(lst, start, end, pivot_index):
lst[pivot_index], lst[end] = lst[end], lst[pivot_index]
store_index = start
pivot = lst[end]
for i in xrange(start, end):
if lst[i] < pivot:
list[i], lst[store_index] = lst[store_index], list[i]
store_index += 1
lst[store_index], lst[end] = lst[end], lst[store_index]
return store_index
def quick_sort(lst, start, end):
if start >= end:
return
pivot_index = randrange(start, end + 1)
new_pivot_index = partition(lst, start, end, pivot_index)
quick_sort(lst, start, new_pivot_index - 1)
quick_sort(lst, new_pivot_index +1, end)
- why we prefer quick sort rather than merge sort?
- it has better space complexity worst:n average:logn
- the worst case of time complexity (n^2) occurs raely
- it is cache friendly because of its good locality of reference(reuse the same memory)
- it is tail recursive, so there is no need to save the stack frame of the current function
网友评论