# Import Libraries
from osgeo import gdal
import numpy as np
import scipy
from scipy import ndimage as ndi
from matplotlib import pyplot as plt
from matplotlib import colors
from skimage import exposure
from skimage import feature
from skimage.segmentation import quickshift, felzenszwalb
from sklearn import metrics
from sklearn.ensemble import RandomForestClassifier
from skimage.color import rgb2gray
from skimage import data
import os
import math
import cv2
# Import data folders
data_folder = os.path.abspath("data")
# Set Output Folder
output_folder = os.path.abspath("output")
if not os.path.exists(output_folder):
os.makedirs(output_folder)
amaravati_data_folder = os.path.join(data_folder, 'RGB', 'amaravati')
amaravati_files = os.listdir(amaravati_data_folder)
amaravati_files[0]
# Test File
testfile = os.path.join(data_folder, 'RGB', 'test', 'a.tif')
raster_dataset = gdal.Open(testfile, gdal.GA_ReadOnly)
geo_transform = raster_dataset.GetGeoTransform()
proj = raster_dataset.GetProjectionRef()
n_bands = raster_dataset.RasterCount
bands_data = []
for b in range(1, n_bands+1):
band = raster_dataset.GetRasterBand(b)
bands_data.append(band.ReadAsArray())
bands_data = np.dstack(b for b in bands_data)
# Convert Image to RGB
image = np.dstack([bands_data[:, :, 0], bands_data[:, :, 1], bands_data[:, :, 2]])
image = exposure.rescale_intensity(image, out_range='uint8')
image = exposure.equalize_hist(image)
#image = exposure.equalize_adapthist(image)
#image = exposure.adjust_sigmoid(image)
rgb_img = image
# Show Imagery
plt.figure(num=None, figsize=(4, 4), dpi=96, facecolor='w', edgecolor='k')
plt.imshow(rgb_img)
plt.figure(num=None, figsize=(18, 18), dpi=96, facecolor='w', edgecolor='k')
plt.imshow(rgb_img)
# Processing
def chunk_it(image, tile_size):
s = image.shape
no_rows = math.ceil(s[0]/tile_size)
no_cols = math.ceil(s[1]/tile_size)
r = np.array_split(image, no_rows)
rows = []
for x in r:
x = np.array_split(x, no_cols, axis=1)
rows.append(x)
return rows
# Chunk the Imagery to parts
chunked = chunk_it(rgb_img , 256)
fig, ax = plt.subplots(nrows=len(chunked), ncols=len(chunked[0]), figsize=(12, 12))
for r, row in enumerate(chunked):
for c, col in enumerate(row):
ax[r][c].imshow(col)
ax[r][c].axis('off')
# Check single imagery
plt.figure(num=None, figsize=(4, 4), dpi=96, facecolor='w', edgecolor='k')
plt.imshow(chunked[5][5])
# Segmentation
segments_quick = quickshift(test, kernel_size=7, max_dist=10, ratio=0.35, convert2lab=False)
n_segments = len(np.unique(segments_quick))
print(n_segments)
cmap = colors.ListedColormap(np.random.rand(n_segments, 3))
plt.figure()
plt.imshow(segments_quick, interpolation='none', cmap=cmap)
# Threshold
img = (chunked[5][5]*255).astype(np.uint8)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
plt.imshow(thresh, cmap=plt.cm.gray)
# Grayscale
grayscale = rgb2gray(img)
plt.imshow(grayscale, cmap=plt.cm.gray)
# Feature Detection
detector = feature.CENSURE()
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(12, 6))
detector.detect(grayscale)
ax[0].imshow(grayscale, cmap=plt.cm.gray)
ax[0].scatter(detector.keypoints[:, 1], detector.keypoints[:, 0],
2 ** detector.scales, facecolors='none', edgecolors='r')
ax[0].set_title("Grayscale Image used for analysis")
ax[1].imshow(img)
ax[1].scatter(detector.keypoints[:, 1], detector.keypoints[:, 0],
2 ** detector.scales, facecolors='none', edgecolors='r')
ax[1].set_title("Original Image")
plt.show()
# Nir Imagery
nir_img = bands_data[:, :, 3]
plt.imshow(nir_img, cmap=plt.cm.gray)
# False colour composite
image = np.dstack([bands_data[:, :, 3], bands_data[:, :, 0], bands_data[:, :, 1]])
image = exposure.rescale_intensity(image, out_range='uint8')
#image = exposure.equalize_hist(image)
#image = exposure.equalize_adapthist(image)
#image = exposure.adjust_sigmoid(image)
fcc_img = image
# Plot False Colour Composite
plt.imshow(fcc_img)
plt.figure(num=None, figsize=(18, 18), dpi=96, facecolor='w', edgecolor='k')
plt.imshow(fcc_img)
# Chunk the Imagery to parts
chunked = chunk_it(fcc_img , 256)
fig, ax = plt.subplots(nrows=len(chunked), ncols=len(chunked[0]), figsize=(12, 12))
for r, row in enumerate(chunked):
for c, col in enumerate(row):
ax[r][c].imshow(col)
ax[r][c].axis('off')
# Check single imagery
fcc_sample = chunked[5][5]
plt.figure(num=None, figsize=(4, 4), dpi=96, facecolor='w', edgecolor='k')
plt.imshow(fcc_sample)
# Threshold
img = fcc_sample.astype(np.uint8) #(chunked[5][5]*255).astype(np.uint8)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
plt.imshow(thresh, cmap=plt.cm.gray)
# Grayscale
grayscale = rgb2gray(img)
plt.imshow(grayscale, cmap=plt.cm.gray)
# Feature Detection
detector = feature.CENSURE()
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(12, 6))
detector.detect(grayscale)
ax[0].imshow(grayscale, cmap=plt.cm.gray)
ax[0].scatter(detector.keypoints[:, 1], detector.keypoints[:, 0],
2 ** detector.scales, facecolors='none', edgecolors='r')
ax[0].set_title("Grayscale Image used for analysis")
ax[1].imshow(img)
ax[1].scatter(detector.keypoints[:, 1], detector.keypoints[:, 0],
2 ** detector.scales, facecolors='none', edgecolors='r')
ax[1].set_title("Original Image")
plt.show()
nir = fcc_sample[:, :, 0]
nir = exposure.rescale_intensity(nir, out_range='uint8')
red = fcc_sample[:, :, 1]
red = exposure.rescale_intensity(red, out_range='uint8')
check = np.logical_and ( red > 1, nir > 1 )
# NDVI
ndvi = np.where ( check, (nir - red ) / ( nir + red ), -999)
ndvi2 = (ndvi*255).astype(np.uint8)
fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(12, 6))
ax[0].imshow(img)
ax[0].set_title("Original Image")
ax[1].imshow(ndvi)
ax[1].set_title("Detected Features")
ax[2].imshow(ndvi2)
ax[2].set_title("NDVI")
# Water Body Extraction
nir = fcc_sample[:, :, 0]
red = fcc_sample[:, :, 1]
check = np.logical_and ( red > 0, nir > 0 )
# NDVI
ndvi = np.where ( check, (nir - red ) / ( nir + red ), -999)
#ndvi = exposure.rescale_intensity(ndvi, out_range='uint8')
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(12, 6))
ax[0].imshow(img)
ax[0].set_title("Original Image")
ax[1].imshow(ndvi)
ax[1].set_title("Detected Features")