calculate overlapping areas - area

I am following the example here and successfully created convex hulls. But I have a question on how to calculate the shared areas between each convex hull in the following figure:
Thanks!

Here's an example to get the intersection of Manhattan and the Bronx. You could use pd.concat() before .overlay() if you want to combine boroughs.
import geopandas as gpd
nybb_path = gpd.datasets.get_path('nybb')
boros = gpd.read_file(nybb_path)
boros.set_index('BoroCode', inplace=True)
boros.sort_index(inplace=True)
boros['geometry'] = boros['geometry'].convex_hull
print(boros)
BoroName Shape_Leng Shape_Area geometry
BoroCode
1 Manhattan 359299.096471 6.364715e+08 POLYGON ((977855.445 188082.322, 971830.134 19...
2 Bronx 464392.991824 1.186925e+09 POLYGON ((1017949.978 225426.885, 1015563.562 ...
3 Brooklyn 741080.523166 1.937479e+09 POLYGON ((988872.821 146772.032, 983670.606 14...
4 Queens 896344.047763 3.045213e+09 POLYGON ((1000721.532 136681.776, 994611.996 2...
5 Staten Island 330470.010332 1.623820e+09 POLYGON ((915517.688 120121.881, 915467.035 12...
manhattan_gdf = boros.iloc[0:1, :]
bronx_gdf = boros.iloc[1:2, :]
manhattan_bronx_intersecetion_polygon = gpd.overlay(manhattan_gdf, bronx_gdf,
how='intersection')
#SPCS83 New York Long Island zone (US Survey feet)
print(manhattan_bronx_intersecetion_polygon.geometry[0].area)
164559574.89341027
ax = manhattan_bronx_intersecetion_polygon.plot(figsize=(6,6))
boros.plot(ax=ax, facecolor='none', edgecolor='k');
Here is a loop solution like you asked for in your comment.
import geopandas as gpd
nybb_path = gpd.datasets.get_path('nybb')
boros = gpd.read_file(nybb_path)
boros.set_index('BoroCode', inplace=True)
boros.sort_index(inplace=True)
boros['geometry'] = boros['geometry'].convex_hull
intersection_polygons_list = []
for idx, row in boros.iterrows():
main_boro_gdf = boros.iloc[idx-1:idx, :]
print('\n' + 'main boro:', main_boro_gdf['BoroName'].values.tolist()[:])
other_boro_list = boros.index.tolist()
other_boro_list.remove(idx)
other_boro_gdf = boros[boros.index.isin(other_boro_list)]
print('other boros:',other_boro_gdf['BoroName'].values.tolist()[:])
intersection_polygons = gpd.overlay(main_boro_gdf, other_boro_gdf, how='intersection')
intersection_polygons['intersection_area'] = intersection_polygons.geometry.area
print('intersecton area sum:', intersection_polygons['intersection_area'].sum())
intersection_polygons_list.append(intersection_polygons)
output:
main boro: ['Manhattan']
other boros: ['Bronx', 'Brooklyn', 'Queens', 'Staten Island']
intersecton area sum: 279710750.6116526
main boro: ['Bronx']
other boros: ['Manhattan', 'Brooklyn', 'Queens', 'Staten Island']
intersecton area sum: 216638786.2669542
main boro: ['Brooklyn']
other boros: ['Manhattan', 'Bronx', 'Queens', 'Staten Island']
intersecton area sum: 1506573115.3550038
main boro: ['Queens']
other boros: ['Manhattan', 'Bronx', 'Brooklyn', 'Staten Island']
intersecton area sum: 1560297426.3563197
main boro: ['Staten Island']
other boros: ['Manhattan', 'Bronx', 'Brooklyn', 'Queens']
intersecton area sum: 0.0
You can plot using the intersection_polygons_list index values. For example here are the overlapping areas for the Bronx:
intersection_polygons_list[1].plot()

Related

Facebook DETR resnet 50 in HuggingFace Hub

Trying the pretrained Faceboook DETR model for object detection using the HuggingFace implementation.
The sample code listed below from https://huggingface.co/facebook/detr-resnet-50 is straightforward.
from transformers import DetrFeatureExtractor, DetrForObjectDetection
from PIL import Image
import requests
import numpy as np
url = 'http://images.cocodataset.org/val2017/000000039769.jpg'
image = Image.open(requests.get(url, stream=True).raw)
feature_extractor = DetrFeatureExtractor.from_pretrained('facebook/detr-resnet-50')
model = DetrForObjectDetection.from_pretrained('facebook/detr-resnet-50')
inputs = feature_extractor(images=image, return_tensors="pt")
outputs = model(**inputs)
# model predicts bounding boxes and corresponding COCO classes
logits = outputs.logits
bboxes = outputs.pred_boxes
I can use
threshod = 0.7
labels =['background', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
'train', 'truck', 'boat', 'traffic light', 'fire hydrant',
'street sign', 'stop sign', 'parking meter', 'bench', 'bird',
'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra',
'giraffe', 'hat', 'backpack', 'umbrella', 'shoe', 'eye glasses',
'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard',
'sports ball', 'kite', 'baseball bat', 'baseball glove',
'skateboard', 'surfboard', 'tennis racket', 'bottle', 'plate',
'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana',
'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog',
'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',
'mirror', 'dining table', 'window', 'desk', 'toilet', 'door', 'tv',
'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',
'oven', 'toaster', 'sink', 'refrigerator', 'blender', 'book',
'clock', 'vase', 'scissors', 'teddy bear', 'hair drier',
'toothbrush']
np_softmax = (logits.softmax(-1)[0, :, :-1]).detach().numpy()
classes = []
probability = []
idx = []
for i, j in enumerate(np_softmax):
if np.max(j) > threshold:
classes.append(labels[np.argmax(j)])
probability.append(np.max(j))
idx.append(i)
to retrieve the detected classes. But I did not fully understand the coordinates in the bboxes.
This is a torch tensor with 100 bounded boxes coordinates of 4 dimensions. With idx I can get the index of the classes so I can get their corresponding boxes. Seems the coordinates are normalized because they are all between 0 and 1. I have a difficulty to remap the coordinates into pixels so I can draw the bounded boxes on the original images. Could not find documentation on this, any suggestions? Thanks
Okay, figured it out, the four coordinates are the normalized (X center, Y center, Width, Height)
if you want to draw a rectangle for each bbox you can use this code:
plt.figure(figsize=(16,10))
plt.imshow(pil_img)
ax = plt.gca()
(xmin, ymin, xmax, ymax) = bbox
ax.add_patch(plt.Rectangle(
(xmin, ymin),
xmax - xmin,
ymax - ymin,
fill = False,
color = c,
linewidth = 3
))

Numpy custom named datatype slice

I am creating a 3D sphere with 2D grid by selected latitudes and longitudes, which show the cartesian coordinates. This grid represent the key points to draw 3D sphere. Than I am creating X,Y,Z 3D coordinates values from these cartesian coordinates with well known formula. Drawn sphere is shown in the attached picture. I am using a numpy named datatype as
np3d = np.dtype([('X', np.float), ('Y', np.float), ('Z', np.float)])
for 3D coordinates and
np2d = np.dtype([('L', np.float), ('B', np.float)])
for latitude/longitude grid. My python code is
import numpy as np, math
import pandas as pd
from matplotlib import pyplot as plt, ticker, patches, font_manager as fmng
from matplotlib.widgets import Cursor, MultiCursor
from pathlib import Path
from datetime import datetime
fig3d = plt.figure('3D Sphere', figsize=(9.5,9.5))
fig3d.subplots_adjust(left=0.04, bottom=0.07, top=0.97, right=0.97, wspace=0, hspace=0)
prmgraf = dict(axis="both", direction='in',top=True, right=True)
ax3 = fig3d.add_subplot(111, projection='3d')
ax3.grid(False)
ax3.minorticks_on()
ax3.yaxis.set_minor_locator(ticker.AutoMinorLocator(5))
ax3.xaxis.set_minor_locator(ticker.AutoMinorLocator(5))
ax3.zaxis.set_minor_locator(ticker.AutoMinorLocator(5))
ax3.tick_params(which='major', length=4, **prmgraf)
ax3.tick_params(which='minor', length=3, **prmgraf)
ax3.set_xlabel('Axis X')
ax3.set_ylabel('Axis Y')
ax3.set_zlabel('Axis Z')
ax3.set_xlim(-15.0, 15.0)
ax3.set_ylim(-15.0, 15.0)
ax3.set_zlim(-15.0, 15.0)
# azim elev
ax3.view_init(0., 180.)
# ---------------------------- settings ------------------------------
nLat = np.vstack(np.radians(np.arange(-65., 70., 5.)))
nLon = np.radians(np.arange(-180., 185., 5.))
np3d = np.dtype([('X', np.float), ('Y', np.float), ('Z', np.float)])
np2d = np.dtype([('L', np.float), ('B', np.float)])
LatN, LonN = (len(nLat), len(nLon))
SphrRadius = 14.5
#2D cartesian coordinates
pSphr = np.zeros(shape=(LatN, LonN), dtype=np2d)
pSphr['L'] = nLat
pSphr['B'] = nLon
#3D sphere coordinates
Spher = np.zeros(shape=(LatN, LonN), dtype=np3d)
Spher['X'] = SphrRadius*np.cos(pSphr['L'])*np.sin(pSphr['B'])
Spher['Y'] = SphrRadius*np.sin(pSphr['L'])
Spher['Z'] = SphrRadius*np.cos(pSphr['L'])*np.cos(pSphr['B'])
# draw sphere latitudes
for i in range(LatN):
kx = Spher[i,:]['X']
ky = Spher[i,:]['Y']
kz = Spher[i,:]['Z']
ax3.plot3D(kx, ky, kz, c='k',lw=0.5)
plt.show()
Z values of the sphere are changing between -15.0 and 15.0. I want to select ONLY POSITIVE Z VALUES in the "Spher variable". In other words, I want to draw half of the sphere in the Z direction. How can i do that in named datatype? Thanks for now to the friends who will answer.
I found solution as
Spher[(Spher['Z'] < 0.)] = np.nan
But this is not the solution I expected. I want to select all points into a new variable as
ZPositive = Spher[(Spher['Z'] < 0.)]
But in this way, 2D data structure change into 1D data.

Fast Radial Symmetry Transform (FRST) implementation (python) results in unusual cross-hair looking artifacts

I am trying to implement FRST on python to detect centroids of elliptical objects (e.g. cells in microscopy images), but my implementation does not find seed points (more or less center points) of elliptical objects. This effort comes from duplicating FRST from Segmentation of Overlapping Elliptical Objects in Silhouette Images (https://ieeexplore.ieee.org/document/7300433). I don't know why I have these artifacts. An interesting thing is that I see these patterns (crosses) all in the same direction per object. Any point in the right direction to generate the same result as in the paper (just to find the seed points) will be most welcome.
Original Paper: A Fast Radial Symmetry Transform for Detecting Points of Interest by Loy and Zelinsky (ECCV 2002)
I have also tried the pre-existing python package for FRST: https://pypi.org/project/frst/. This somehow results in the same artifacts. Weird.
First image: Original Image
Second image: Sobel-operated Image
Third image: Magnitude Projection Image
Fourth image: Magnitude Projection Image with positively affected pixels only
Fifth image: FRST'd image: end-product with original image overlaid (shadowed)
Sixth image: FRST'd image by the pre-existing python package with original image overlaid (shadowed).
from scipy.ndimage import gaussian_filter
import numpy as np
from scipy.signal import convolve
# Get orientation projection image
def get_proj_img(image, radius):
workingDims = tuple((e + 2*radius) for e in image.shape)
h,w = image.shape
ori_img = np.zeros(workingDims) # Orientation Projection Image
mag_img = np.zeros(workingDims) # Magnitutde Projection Image
# Kenels for the sobel operator
a1 = np.matrix([1, 2, 1])
a2 = np.matrix([-1, 0, 1])
Kx = a1.T * a2
Ky = a2.T * a1
# Apply the Sobel operator
sobel_x = convolve(image, Kx)
sobel_y = convolve(image, Ky)
sobel_norms = np.hypot(sobel_x, sobel_y)
# Distances to afpx, afpy (affected pixels)
dist_afpx = np.multiply(np.divide(sobel_x, sobel_norms, out = np.zeros(sobel_x.shape), where = sobel_norms!=0), radius)
dist_afpx = np.round(dist_afpx).astype(int)
dist_afpy = np.multiply(np.divide(sobel_y, sobel_norms, out = np.zeros(sobel_y.shape), where = sobel_norms!=0), radius)
dist_afpy = np.round(dist_afpy).astype(int)
for cords, sobel_norm in np.ndenumerate(sobel_norms):
i, j = cords
pos_aff_pix = (i+dist_afpx[i,j], j+dist_afpy[i,j])
neg_aff_pix = (i-dist_afpx[i,j], j-dist_afpy[i,j])
ori_img[pos_aff_pix] += 1
ori_img[neg_aff_pix] -= 1
mag_img[pos_aff_pix] += sobel_norm
mag_img[neg_aff_pix] -= sobel_norm
ori_img = ori_img[:h, :w]
mag_img = mag_img[:h, :w]
print ("Did it go back to the original image size? ")
print (ori_img.shape == image.shape)
# try normalizing ori and mag img
return ori_img, mag_img
def get_sn(ori_img, mag_img, radius, kn, alpha):
ori_img_limited = np.minimum(ori_img, kn)
fn = np.multiply(np.divide(mag_img,kn), np.power((np.absolute(ori_img_limited)/kn), alpha))
# convolute fn with gaussian filter.
sn = gaussian_filter(fn, 0.25*radius)
return sn
def do_frst(image, radius, kn, alpha, ksize = 3):
ori_img, mag_img = get_proj_img(image, radius)
sn = get_sn(ori_img, mag_img, radius, kn, alpha)
return sn
Parameters:
radius = 50
kn = 10
alpha = 2
beta = 0
stdfactor = 0.25

D3.js : How to calculate accurately the Mercator transformation ratio `r`?

Given a square ABCD on Earth's surface (Equirectangular), with A & B on the Greenwitch meridian, B & C on meridian longitude = 10⁰ :
A( 0.0; 50.0) C(10.0; 50.0)
B( 0.0; 40.0) B(10.0; 40.0)
Given my D3js dataviz works in d3.geo.mercator() projection, my square is vertically transformed by a ratio r= mercator_height in px/width in px about 1.5.
How to calculate accurately this Mercator transformation ratio r ?
Note: this is non-linear since it imply some 1/cos() [2].
Edit: I'am tempted to think we should first reproject each point using d3.geo.mercator() on the screen (HOW? Which syntax ?), so D3 do all the hard maths. We could then GET the point's pixels coordinates, so we can calculate the length AB and the length AC in pixels, and finally r=AC/AB. Also, it's a bit how to convert decimal degrees coordinates into projected pixels coordinates function of the chosen d3.geo.<PROJECTIONNAME>() ?
[2]: Mercator: scale factor is changed along the meridians as a function of latitude?
I will assume that the points are A: (0, 50), B: (0, 40), C: (10, 50) and D: (10, 40). The feature enclosed by the points (A, C, D, B) will look as a square using the equirectangular projection. Now, the points are longitude, latitude pairs, you can compute the great-arc distance between the points using d3.geo.distance. This will give you the angular distance between the points. For instance:
// Points (lon, lat)
var A = [ 0, 50],
B = [ 0, 40],
C = [10, 50],
D = [10, 40];
// Geographic distance between AB and AC
var distAB = d3.geo.distance(A, B), // 0.17453292519943306 radians
distAC = d3.geo.distance(A, C); // 0.11210395570214343 radians
Now, these distances are the angles between the points, as you can see, the feature wasn't a square. If we project the points using the D3 Mercator projection:
// The map will fit in 800 px horizontally
var width = 800;
var mercator = d3.geo.mercator()
.scale(width / (2 * Math.PI));
// Project (lon, lat) points using the projection, to get pixel coordinates.
var pA = mercator(A), // [480, 121] (rounded)
pB = mercator(B), // [480, 152] (rounded)
pC = mercator(C); // [502, 121] (rounded)
And now use the euclidean distance to compute the distance between the projected points pA, pB and pC.
function dist(p, q) {
return Math.sqrt(Math.pow(p[0] - q[0], 2) + Math.pow(p[1] - q[1], 2));
}
var pDistAB = dist(pA, pB), // 31.54750649588999 pixels
pDistAC = dist(pA, pC); // 22.22222222222223 pixels
If you use angular distances as reference, you will get two ratios, one for AB and other for AC:
var ratioAB = distAB / pDistAB, // 0.005532384159178197 radians/pixels
ratioAC = distAC / pDistAC; // 0.005044678006596453 radians/pixels
If you use the equirectangular projection as reference, you can use the euclidean distance between the points (as if they were in a plane surface):
var ratioAB = dist(A, B) / pDistAB, // 0.3169822629659431 degrees/pixels
ratioAC = dist(A, C) / pDistAC; // 0.44999999999999984 degrees/pixels

Algorithm to subdivide a polygon in smaller polygons

I have a polygon made of successive edges on a plane, and would like to subdivide it in sub-polygons being triangles or rectangles.
Where can I find an algorithm to do this ?
Thanks !
In computational geometry, the problem you want to solve is called triangulation.
There are algorithms to solve this problem, giving triangulations with different properties. You will need to decide which one is the best fit.
I was looking for an answer for this myself but couldn't find one. Tried to stitch together several pieces and here's the result.
This is not necessarily the most optimal routine but it did the job for me. If you want to increase performance, try experimenting with the code.
A brief description of the algorithm:
Using the boundaries of the original geometry itself, and the boundaries of its convex hull, and its minimum rotated rectangle, derive all possible rectangles.
Divide all rectangles into smaller squares of specified side length.
Drop duplicates using a rounded off centroid. (r: round off param)
Retain either those squares 'within' the geometry, or those that 'intersect' the geometry, depending on whichever is closer to the total number of required squares.
EDITED
New Solution
#### Python script for dividing any shapely polygon into smaller equal sized polygons
import numpy as np
from shapely.ops import split
import geopandas
from shapely.geometry import MultiPolygon, Polygon
def rhombus(square):
"""
Naively transform the square into a Rhombus at a 45 degree angle
"""
coords = square.boundary.coords.xy
xx = list(coords[0])
yy = list(coords[1])
radians = 1
points = list(zip(xx, yy))
Rhombus = Polygon(
[
points[0],
points[1],
points[3],
((2 * points[3][0]) - points[2][0], (2 * points[3][1]) - points[2][1]),
points[4],
]
)
return Rhombus
def get_squares_from_rect(RectangularPolygon, side_length=0.0025):
"""
Divide a Rectangle (Shapely Polygon) into squares of equal area.
`side_length` : required side of square
"""
rect_coords = np.array(RectangularPolygon.boundary.coords.xy)
y_list = rect_coords[1]
x_list = rect_coords[0]
y1 = min(y_list)
y2 = max(y_list)
x1 = min(x_list)
x2 = max(x_list)
width = x2 - x1
height = y2 - y1
xcells = int(np.round(width / side_length))
ycells = int(np.round(height / side_length))
yindices = np.linspace(y1, y2, ycells + 1)
xindices = np.linspace(x1, x2, xcells + 1)
horizontal_splitters = [
LineString([(x, yindices[0]), (x, yindices[-1])]) for x in xindices
]
vertical_splitters = [
LineString([(xindices[0], y), (xindices[-1], y)]) for y in yindices
]
result = RectangularPolygon
for splitter in vertical_splitters:
result = MultiPolygon(split(result, splitter))
for splitter in horizontal_splitters:
result = MultiPolygon(split(result, splitter))
square_polygons = list(result)
return square_polygons
def split_polygon(G, side_length=0.025, shape="square", thresh=0.9):
"""
Using a rectangular envelope around `G`, creates a mesh of squares of required length.
Removes non-intersecting polygons.
Args:
- `thresh` : Range - [0,1]
This controls - the number of smaller polygons at the boundaries.
A thresh == 1 will only create (or retain) smaller polygons that are
completely enclosed (area of intersection=area of smaller polygon)
by the original Geometry - `G`.
A thresh == 0 will create (or retain) smaller polygons that
have a non-zero intersection (area of intersection>0) with the
original geometry - `G`
- `side_length` : Range - (0,infinity)
side_length must be such that the resultant geometries are smaller
than the original geometry - `G`, for a useful result.
side_length should be >0 (non-zero positive)
- `shape` : {square/rhombus}
Desired shape of subset geometries.
"""
assert side_length>0, "side_length must be a float>0"
Rectangle = G.envelope
squares = get_squares_from_rect(Rectangle, side_length=side_length)
SquareGeoDF = geopandas.GeoDataFrame(squares).rename(columns={0: "geometry"})
Geoms = SquareGeoDF[SquareGeoDF.intersects(G)].geometry.values
if shape == "rhombus":
Geoms = [rhombus(g) for g in Geoms]
geoms = [g for g in Geoms if ((g.intersection(G)).area / g.area) >= thresh]
elif shape == "square":
geoms = [g for g in Geoms if ((g.intersection(G)).area / g.area) >= thresh]
return geoms
# Reading geometric data
geo_filepath = "/data/geojson/pc_14.geojson"
GeoDF = geopandas.read_file(geo_filepath)
# Selecting random shapely-geometry
G = np.random.choice(GeoDF.geometry.values)
squares = split_polygon(G,shape='square',thresh=0.5,side_length=0.025)
rhombuses = split_polygon(G,shape='rhombus',thresh=0.5,side_length=0.025)
Previous Solution:
import numpy as np
import geopandas
from shapely.ops import split
from shapely.geometry import MultiPolygon, Polygon, Point, MultiPoint
def get_rect_from_geom(G, r=2):
"""
Get rectangles from a geometry.
r = rounding factor.
small r ==> more rounding off ==> more rectangles
"""
coordinate_arrays = G.exterior.coords.xy
coordinates = list(
zip(
[np.round(c, r) for c in coordinate_arrays[0]],
[np.round(c, r) for c in coordinate_arrays[1]],
)
)
Rectangles = []
for c1 in coordinates:
Coords1 = [a for a in coordinates if a != c1]
for c2 in Coords1:
Coords2 = [b for b in Coords1 if b != c2]
x1, y1 = c1[0], c1[1]
x2, y2 = c2[0], c2[1]
K1 = [k for k in Coords2 if k == (x1, y2)]
K2 = [k for k in Coords2 if k == (x2, y1)]
if (len(K1) > 0) & (len(K2) > 0):
rect = [list(c1), list(K1[0]), list(c2), list(K2[0])]
Rectangles.append(rect)
return Rectangles
def get_squares_from_rect(rect, side_length=0.0025):
"""
Divide a rectangle into equal area squares
side_length = required side of square
"""
y_list = [r[1] for r in rect]
x_list = [r[0] for r in rect]
y1 = min(y_list)
y2 = max(y_list)
x1 = min(x_list)
x2 = max(x_list)
width = x2 - x1
height = y2 - y1
xcells, ycells = int(np.round(width / side_length)), int(
np.round(height / side_length)
)
yindices = np.linspace(y1, y2, ycells + 1)
xindices = np.linspace(x1, x2, xcells + 1)
horizontal_splitters = [
LineString([(x, yindices[0]), (x, yindices[-1])]) for x in xindices
]
vertical_splitters = [
LineString([(xindices[0], y), (xindices[-1], y)]) for y in yindices
]
result = Polygon(rect)
for splitter in vertical_splitters:
result = MultiPolygon(split(result, splitter))
for splitter in horizontal_splitters:
result = MultiPolygon(split(result, splitter))
square_polygons = list(result)
return [np.stack(SQPOLY.exterior.coords.xy, axis=1) for SQPOLY in square_polygons]
def round_centroid(g, r=10):
"""
Get Centroids.
Round off centroid coordinates to `r` decimal points.
"""
C = g.centroid.coords.xy
return (np.round(C[0][0], r), np.round(C[1][0], r))
def subdivide_polygon(g, side_length=0.0025, r=10):
"""
1. Create all possible rectangles coordinates from the geometry, its minimum rotated rectangle, and its convex hull.
2. Divide all rectangles into smaller squares.
small r ==> more rounding off ==> fewer overlapping squares. (these are dropped as duplicates)
large r ==> may lead to a few overlapping squares.
"""
# Number of squares required.
num_squares_reqd = g.area // (side_length ** 2)
# Some of these combinations can be dropped to improve performance.
Rectangles = []
Rectangles.extend(get_rect_from_geom(g))
Rectangles.extend(get_rect_from_geom(g.minimum_rotated_rectangle))
Rectangles.extend(get_rect_from_geom(g.convex_hull))
Squares = []
for r in range(len(Rectangles)):
rect = Rectangles[r]
Squares.extend(get_squares_from_rect(rect, side_length=side_length))
SquarePolygons = [Polygon(square) for square in Squares]
GDF = geopandas.GeoDataFrame(SquarePolygons).rename(columns={0: "geometry"})
GDF.loc[:, "centroid"] = GDF.geometry.apply(round_centroid, r=r)
GDF = GDF.drop_duplicates(subset=["centroid"])
wgeoms = GDF[GDF.within(g)].geometry.values
igeoms = GDF[GDF.intersects(g)].geometry.values
w = abs(num_squares_reqd - len(wgeoms))
i = abs(num_squares_reqd - len(igeoms))
print(w, i)
if w <= i:
return wgeoms
else:
return igeoms
geoms = subdivide(g)
Stumbled across this after many searches.
Thanks #Aditya Chhabra for your submission, it works great but get_squares_from_rect is very slow for small side lengths due to iterative clips.
We can do this instantaneously if we combine all LineStrings into a single collection, then clip and polygonize in one step, which I found in in this question.
Previously side lengths of 0.0001 (EPSG:4326) took > 1 minute, now it takes no time.
from shapely.ops import unary_union, polygonize, linemerge
from shapely.geometry import LineString
import numpy as np
def get_squares_from_rect_faster(RectangularPolygon, side_length=0.0025):
rect_coords = np.array(RectangularPolygon.boundary.coords.xy)
y_list = rect_coords[1]
x_list = rect_coords[0]
y1 = min(y_list)
y2 = max(y_list)
x1 = min(x_list)
x2 = max(x_list)
width = x2 - x1
height = y2 - y1
xcells = int(np.round(width / side_length))
ycells = int(np.round(height / side_length))
yindices = np.linspace(y1, y2, ycells + 1)
xindices = np.linspace(x1, x2, xcells + 1)
horizontal_splitters = [
LineString([(x, yindices[0]), (x, yindices[-1])]) for x in xindices
]
vertical_splitters = [
LineString([(xindices[0], y), (xindices[-1], y)]) for y in yindices
]
lines = horizontal_splitters + vertical_splitters
lines.append(RectangularPolygon.boundary)
lines = unary_union(lines)
lines = linemerge(lines)
return list(polygonize(lines))

Resources