Sky localization maps#
from plot_config import setup_matplotlib_style
import matplotlib.pyplot as plt
# Configure matplotlib with high-quality settings
setup_matplotlib_style()
import numpy as np
import healpy as hp
from astropy.utils.data import get_pkg_data_filename
from astropy.io import fits
%matplotlib inline
Load a Flat HEALPix Sky Map#
Example: S251112cm
import gzip
import urllib.request
from io import BytesIO
import healpy as hp
import tempfile
import os
# Download and read directly from GraceDB without saving permanently
url = 'https://gracedb.ligo.org/api/superevents/S251112cm/files/bayestar.fits.gz'
print(f"Downloading from {url}...")
with urllib.request.urlopen(url) as response:
compressed_data = response.read()
print("Decompressing in memory...")
with gzip.open(BytesIO(compressed_data), 'rb') as f_in:
fits_data = f_in.read()
# Create temporary file for healpy to read
with tempfile.NamedTemporaryFile(suffix='.fits', delete=False) as tmp:
tmp.write(fits_data)
tmp_path = tmp.name
try:
# Use healpy to read the map properly (handles NESTED/RING ordering automatically)
print("Reading HEALPix map...")
m, header = hp.read_map(tmp_path, h=True, verbose=False)
header = dict(header)
finally:
# Clean up temporary file
os.unlink(tmp_path)
print(f"✅ Map loaded successfully!")
print(f"Nside = {header['NSIDE']}, Ordering = {header['ORDERING']}")
print(f"Number of pixels = {len(m):,}")
Understanding HEALPix Map Orderings#
HEALPix maps can be stored in two different orderings:
RING ordering: Pixels are arranged in horizontal rings of constant latitude, numbered from north to south pole
NESTED ordering: Pixels follow a hierarchical quad-tree structure, where nearby pixels have nearby indices. Best for computation, indexing, and hierarchical methods
nside = hp.get_nside(m) # or header['NSIDE']
npix = hp.nside2npix(nside)
print(f"Nside: {nside}")
print(f"Total pixels on the sky: {npix:,}")
print(f"Pixel area: {hp.nside2pixarea(nside, degrees=True):.4f} deg²")
3. Visualize the Sky Map (Mollweide Projection)#
hp.mollview(m, unit="Probability density", coord='C', cmap='Blues')
hp.graticule()
Careful about rotated axes!! Zero RA point is at the center of the map#
Find the Highest-Probability Pixel & Convert to Sky Coordinates#
ipix_max = np.argmax(m)
theta, phi = hp.pix2ang(nside, ipix_max)
ra_deg = np.degrees(phi)
dec_deg = 90 - np.degrees(theta)
print(f"Highest probability pixel index: {ipix_max}")
print(f"RA, Dec = {ra_deg:.4f}°, {dec_deg:.4f}°")
Compute Credible Levels (50%, 90%, etc.)#
# Sort pixels by probability density (descending)
sorted_prob = np.sort(m)[::-1]
# Cumulative probability
cumsum = np.cumsum(sorted_prob)
cumsum /= cumsum[-1] # normalize to 1
print(f"Number of pixels in 90% region: {np.count_nonzero(cumsum <= 0.90)}")
print(f"The area of the 90% credible region is {np.count_nonzero(cumsum <= 0.90) * hp.nside2pixarea(nside, degrees=True):.4f} deg²")
def get_credible_level(ra, dec, skymap_data, nside):
"""
Get the credible level for a given RA and Dec position.
Parameters:
ra: Right Ascension in degrees
dec: Declination in degrees
skymap_data: HEALPix probability map
nside: HEALPix nside parameter
Returns:
Credible level as a percentage
"""
# Convert RA, Dec to theta, phi
theta_query = np.radians(90 - dec)
phi_query = np.radians(ra)
# Get pixel index
ipix_query = hp.ang2pix(nside, theta_query, phi_query)
# Get probability at this pixel
prob_at_pixel = skymap_data[ipix_query]
# Calculate credible level
sorted_prob_desc = np.sort(skymap_data)[::-1]
cumsum_prob = np.cumsum(sorted_prob_desc)
cumsum_prob /= cumsum_prob[-1]
# I ASSIGNED TO EACH PIXEL THE CREDIBLE LEVEL BASED ON ITS PROBABILITY DENSITY, AFTER SORTING
# Find where this probability falls in the sorted array
credible_level = cumsum_prob[sorted_prob_desc >= prob_at_pixel][-1] * 100
return credible_level, prob_at_pixel
# Example usage: check the highest probability pixel
ra_test = 180
dec_test = 55
cl, prob = get_credible_level(ra_test, dec_test, m, nside)
print(f"RA={ra_test:.4f}°, Dec={dec_test:.4f}°")
print(f"Probability density: {prob:.6e}")
print(f"Credible level: {cl:.2f}%")
Downgrade to Lower Resolution#
# Configure matplotlib for transparent background
import matplotlib
matplotlib.rcParams['savefig.transparent'] = True
nside_low = 32
m_low = hp.ud_grade(m, nside_out=nside_low) # averages probability density correctly
hp.mollview(m_low,cmap='Blues')
hp.graticule()
Integrated probability inside a circle#
ra_test = 180
dec_test = 55
theta = np.radians(90 - dec_test)
phi = np.radians(ra_test)
radius = np.radians(5) # example radius of 5 degrees
xyz = hp.ang2vec(theta, phi)
ipix_disc = hp.query_disc(nside, xyz, radius)
print(f"{m[ipix_disc].sum()*100:.2f}%")
Integrated probability inside a polygon#
# Convert polygon vertices from RA, Dec to Cartesian coordinates
# Create a square polygon centered at ra_test, dec_test with side 10 degrees
half_side = 5 # half of 10 degrees
vertices_radec = [[ra_test - half_side, dec_test - half_side],
[ra_test + half_side, dec_test - half_side],
[ra_test + half_side, dec_test + half_side],
[ra_test - half_side, dec_test + half_side]]
xyz = np.array([hp.ang2vec(np.radians(90 - dec), np.radians(ra))
for ra, dec in vertices_radec])
ipix_poly = hp.query_polygon(nside, xyz)
print(f"{m[ipix_poly].sum()*100:.2f}%")
Multi-order resolution maps#
Each region of the sky is divided in sub-regions, and the process is done iteretively until it’s reached a resolution of the same spatial scale of the variations of the map in that region of the sky. Coarser grid where the probability has a smoother change, finer where more information is contained
Then we have 2 info: the level of resolution of the sub-grid (given by nside) and the order of the pixel inside that sub-grid. This is compressed in a single index
\( uniq = ipix + 4 * nside^2 \)
from astropy.table import QTable
from astropy import units as u
import astropy_healpix as ah
import numpy as np
skymap = QTable.read('bayestar.multiorder.fits,1')
Most Probable Sky Location#
i = np.argmax(skymap['PROBDENSITY'])
uniq = skymap[i]['UNIQ']
level, ipix = ah.uniq_to_level_ipix(uniq)
nside = ah.level_to_nside(level)
nside
uniq = skymap[0]['UNIQ']
level, ipix = ah.uniq_to_level_ipix(uniq)
nside = ah.level_to_nside(level)
nside
This shows that around the peak of the probability map we have a high density of information, hence we require more resolution
ra, dec = ah.healpix_to_lonlat(ipix, nside, order='nested')
ra.deg, dec.deg
Probability Density at a Known Position#
ra = 180 * u.deg
dec = 55 * u.deg
max_level = 29
max_nside = ah.level_to_nside(max_level)
level, ipix = ah.uniq_to_level_ipix(skymap['UNIQ'])
index = ipix * (2**(max_level - level))**2
sorter = np.argsort(index)
match_ipix = ah.lonlat_to_healpix(ra, dec, max_nside, order='nested')
i = sorter[np.searchsorted(index, match_ipix, side='right', sorter=sorter)]
print('This is the probability density per deg^2:', skymap[i]['PROBDENSITY'].to_value(u.deg**-2))
Credible level at a Known Position#
import ligo.skymap.io.fits
from ligo.skymap.postprocess.util import find_greedy_credible_levels
from astropy.coordinates import SkyCoord
skymap_arr, _ = ligo.skymap.io.fits.read_sky_map('bayestar.multiorder.fits,1', nest=True)
cls = 100 * ligo.skymap.postprocess.util.find_greedy_credible_levels(skymap_arr)
theta = 0.5 * np.pi - np.deg2rad(-13.4780549)
phi = np.deg2rad(346.81640625)
# Query the multi-order map (handles variable resolution automatically)
ipix = hp.ang2pix(hp.get_nside(skymap_arr), theta, phi, nest=True)
print(f"Credible level : {cls[ipix]:.2f}%")
Is there an alternative way to do that just using MOC forma?
90% region#
#compute 90% area credible region from cls
#compute 90% area credible region from cls
a_90 = cls[cls <= 90]
n_pixels_90 = len(a_90)
area_90 = n_pixels_90 * hp.nside2pixarea(hp.get_nside(skymap_arr), degrees=True)
print(f"Number of pixels in 90% credible region: {n_pixels_90:,}")
print(f"Area of 90% credible region: {area_90:.2f} deg²")
Notice that now since each pixel has different area, we can only report probability per unit area
Probability inside a circle#
ra_test = 180
dec_test = 55
theta = np.radians(90 - dec_test)
phi = np.radians(ra_test)
radius = np.radians(5) # example radius of 5 degrees
xyz = hp.ang2vec(theta, phi)
ipix_disc = hp.query_disc(hp.get_nside(skymap_arr), xyz, radius)
print(f"{m[ipix_disc].sum()*100:.2f}%")
Let’s plot the skymap with credible contours#
import ligo.skymap.io.fits
import ligo.skymap.postprocess.util
import numpy as np
import matplotlib.pyplot as pp
from matplotlib.colors import LogNorm
from matplotlib import cm
import ligo.skymap.plot
skymap, _ = ligo.skymap.io.fits.read_sky_map('bayestar.multiorder.fits,1', nest = True) # NEST = True ensures that the order is rearranged properly, both for flat and multi-order maps
fig = pp.figure(dpi=300)
fig.patch.set_visible(False)
ax = pp.axes(projection='astro degrees mollweide')
ax.set_facecolor('none')
ax.grid()
cls = 100 * ligo.skymap.postprocess.util.find_greedy_credible_levels(skymap)
skymap[cls > 90] = np.nan
skymap[skymap == 0] = np.nan
ax.imshow_hpx((skymap, 'ICRS'), nested=True, cmap=cm.Oranges)
ax.contour_hpx((cls, 'ICRS'), nested=True, colors='black', levels=(50, 90), zorder=1, linestyles=['dashed', 'solid'], linewidths=0.5)
pp.show()
Zoom#
# Use the peak location from earlier
fig = plt.figure()
fig.patch.set_visible(False)
ax = plt.axes(projection='astro zoom',
center=f'{ra_deg:.4f}d {dec_deg:.4f}d', radius='30 deg')
ax.set_facecolor('none')
ax.grid()
ax.imshow_hpx((skymap, 'ICRS'), nested=True, cmap='Blues')
plt.title(f'Zoomed view around peak (RA={ra_deg:.2f}°, Dec={dec_deg:.2f}°)')
plt.show()
Going in distance#
Once we fix the position in the sky, the conditional posterior on distance is well approximated by a Gaussian times a uniform-in-volume prior.
\(p(D_L | \text{RA, Dec}) = \text{DISTNORM(RA,Dec)} \times \exp\Big[-\dfrac{(D_L - \text{DISTMU(RA,Dec)})^2}{2 \text{DISTSTD(RA,Dec)}^2}\Big]\times D_L^2\)
# Posterior p(D_L | RA, Dec) using BAYESTAR multi-order distance parameters
from scipy.stats import norm
import matplotlib
import matplotlib.pyplot as plt
def distance_posterior(ra_deg, dec_deg, n=2000):
# Load multi-order table without clobbering earlier 'skymap' array
mo_table = QTable.read('bayestar.multiorder.fits,1')
# Map (ra, dec) to the appropriate multi-order pixel
level, ipix = ah.uniq_to_level_ipix(mo_table['UNIQ'])
max_level = 29
max_nside = ah.level_to_nside(max_level)
index = ipix * (2**(max_level - level))**2
sorter = np.argsort(index)
ra_q = ra_deg if hasattr(ra_deg, 'unit') else ra_deg * u.deg
dec_q = dec_deg if hasattr(dec_deg, 'unit') else dec_deg * u.deg
match_ipix = ah.lonlat_to_healpix(ra_q, dec_q, max_nside, order='nested')
i = sorter[np.searchsorted(index, match_ipix, side='right', sorter=sorter)]
mu = mo_table[i]['DISTMU'] # [Mpc]
sigma = mo_table[i]['DISTSIGMA'] # [Mpc]
distnorm = mo_table[i]['DISTNORM'] # [1/Mpc^3]
# Grid in distance (truncate at 0 and extend to ~5 sigma)
dmin = mu - 5 * sigma
dmax = (mu + 5 * sigma)
D = np.linspace(dmin, dmax, n)
# Conditional posterior p(D | Ω) ∝ DISTNORM * exp(-(D - mu)^2/(2σ^2)) * D^2
p = D**2 * distnorm * norm(mu, sigma).pdf(D)
# # Normalize numerically (should already be ~1)
# Z = np.trapz(p.value, D.to_value(u.Mpc))
# p /= Z
return D, p, mu, sigma, norm, i
ra_query, dec_query = 180 * u.deg, 75 * u.deg
D, pD, mu, sigma, norm, idx = distance_posterior(ra_query, dec_query)
print(f"Using RA={ra_query.to_value(u.deg):.3f} deg, Dec={dec_query.to_value(u.deg):.3f} deg")
print(f"mu={mu.to_value(u.Mpc):.2f}, sigma={sigma.to_value(u.Mpc):.2f}")
# print(f"∫ p(D|Ω) dD ≈ {np.trapz(pD.value, D.to_value(u.Mpc)):.4f}")
plt.figure()
plt.plot(D, pD)
plt.xlabel('D_L [Mpc]')
plt.ylabel('p(D_L | RA, Dec) [1/Mpc]')
plt.title('Distance posterior at given sky position')
plt.grid(True, alpha=0.3)
plt.show()
Distance posterior marginalized over the sky#
# Configure matplotlib for transparent background
import matplotlib
matplotlib.rcParams['savefig.transparent'] = True
matplotlib.rcParams['figure.facecolor'] = 'none'
matplotlib.rcParams['axes.facecolor'] = 'none'
dmin = 0
dmax = 300
D = np.linspace(dmin, dmax, 100)
mo_table = QTable.read('bayestar.multiorder.fits,1')
level, ipix = ah.uniq_to_level_ipix(mo_table['UNIQ'])
pixel_area = ah.nside_to_pixel_area(ah.level_to_nside(level))
mu = mo_table['DISTMU'] .to_value(u.Mpc) # [Mpc]
sigma = mo_table['DISTSIGMA'].to_value(u.Mpc) # [Mpc]
distnorm = mo_table['DISTNORM'].to_value(1/u.Mpc**2) # [1/Mpc^3]
prob_density = mo_table['PROBDENSITY'].to_value(1/u.sr) # [1/sr]
pixel_area_sr = ah.nside_to_pixel_area(ah.level_to_nside(level)) # [sr]
prob_density *= pixel_area_sr.to_value(u.sr) # Convert to total probability per pixel
# Compute marginalized distance posterior
DL_marginal = np.array([np.sum(prob_density * dd**2 * distnorm * norm(mu, sigma).pdf(dd)) for dd in D])
# Normalize the posterior
DL_marginal /= np.trapz(DL_marginal, D)
# Compute cumulative distribution
cumulative = np.cumsum(DL_marginal)
cumulative /= cumulative[-1]
# Find median (50th percentile)
median_idx = np.searchsorted(cumulative, 0.5)
median_distance = D[median_idx]
# Find 68% credible interval (16th and 84th percentiles)
lower_idx = np.searchsorted(cumulative, 0.16)
upper_idx = np.searchsorted(cumulative, 0.84)
lower_distance = D[lower_idx]
upper_distance = D[upper_idx]
# Compute sigma (half of the 68% credible interval width)
sigma_distance = (upper_distance - lower_distance) / 2
print(f"Median distance: {median_distance:.2f} Mpc")
print(f"68% credible interval: [{lower_distance:.2f}, {upper_distance:.2f}] Mpc")
print(f"Sigma: {sigma_distance:.2f} Mpc")
plt.figure()
plt.plot(D, DL_marginal, label='Marginalized distance posterior')
plt.axvline(median_distance, color='red', linestyle='--', label=f'Median: {median_distance:.2f} Mpc')
plt.axvline(lower_distance, color='orange', linestyle=':', label=f'68% CI: [{lower_distance:.2f}, {upper_distance:.2f}] Mpc')
plt.axvline(upper_distance, color='orange', linestyle=':')
plt.fill_between(D, 0, DL_marginal, where=(D >= lower_distance) & (D <= upper_distance),
alpha=0.3, color='orange', label=f'68% region (σ={sigma_distance:.2f} Mpc)')
plt.xlabel('D_L [Mpc]')
plt.ylabel('p(D_L) [1/Mpc]')
plt.title('Distance posterior marginalized over the sky')
plt.legend()
plt.grid(True, alpha=0.3)
plt.show()
Probability density per unit volume#
mo_table = QTable.read('bayestar.multiorder.fits,1')
# Map (ra, dec) to the appropriate multi-order pixel
level, ipix = ah.uniq_to_level_ipix(mo_table['UNIQ'])
max_level = 29
max_nside = ah.level_to_nside(max_level)
index = ipix * (2**(max_level - level))**2
sorter = np.argsort(index)
ra_q = 180 * u.deg
dec_q = 55 * u.deg
d_test = 100 * u.Mpc
match_ipix = ah.lonlat_to_healpix(ra_q, dec_q, max_nside, order='nested')
i = sorter[np.searchsorted(index, match_ipix, side='right', sorter=sorter)]
level, ipix = ah.uniq_to_level_ipix(mo_table['UNIQ'])
pixel_area = ah.nside_to_pixel_area(ah.level_to_nside(level))
mu = mo_table['DISTMU'][i].to_value(u.Mpc) # [Mpc]
sigma = mo_table['DISTSIGMA'][i].to_value(u.Mpc) # [Mpc]
distnorm = mo_table['DISTNORM'][i].to_value(1/u.Mpc**2) # [1/Mpc^3]
prob_density = mo_table['PROBDENSITY'][i].to_value(1/u.sr) # [1/sr]
pixel_area_sr = ah.nside_to_pixel_area(ah.level_to_nside(level))[i].to_value(u.sr) # [sr]
dv = prob_density * distnorm * norm(mu, sigma).pdf(d_test.to_value(u.Mpc)) / pixel_area_sr # [1/Mpc]
dv
What I am doing if instead of doing for pixel [i], I do
mu = mo_table['DISTMU'].to_value(u.Mpc) # [Mpc]
sigma = mo_table['DISTSIGMA'].to_value(u.Mpc) # [Mpc]
distnorm = mo_table['DISTNORM'].to_value(1/u.Mpc**2) # [1/Mpc^3]
prob_density = mo_table['PROBDENSITY'].to_value(1/u.sr) # [1/sr]
pixel_area_sr = ah.nside_to_pixel_area(ah.level_to_nside(level)).to_value(u.sr) # [sr]
dv = prob_density * distnorm * norm(mu, sigma).pdf(d_test.to_value(u.Mpc)) / pixel_area_sr # [1/Mpc]
dv
Optimal Sphere Tessellation with Circles#
A method to achieve uniform coverage of a sphere using circles of fixed radius. We’ll explore:
Fibonacci Lattice: Generate uniformly distributed candidate positions
Greedy Selection: Iteratively select circles that maximize uncovered probability
No Double-Counting: Track covered pixels to compute only new probability added by each circle
The goal is to cover a target probability (e.g., 90%) with the minimum number of circles, approximating optimal hexagonal packing on a sphere.
Plotting function for spherical caps#
def plot_spherical_cap(ax, center_lon_deg, center_lat_deg, radius_deg,
npoints=720, fill=True, facecolor='none', edgecolor='red',
alpha=0.3, linewidth=0.8, zorder=5):
"""
Plot a spherical cap (sphere ∩ cone) on an axes that accepts transform=ax.get_transform('icrs').
center_lon_deg, center_lat_deg: degrees (RA/longitude, Dec/latitude in ICRS)
radius_deg: angular radius of the cap in degrees
npoints: number of points to sample boundary (higher -> smoother)
Handles circles that cross the RA=0/360 boundary by splitting them into multiple segments.
"""
# center as SkyCoord and radial positions around it
center = SkyCoord(center_lon_deg * u.deg, center_lat_deg * u.deg, frame='icrs')
pos_angles = np.linspace(0, 360, npoints) * u.deg
boundary = center.directional_offset_by(pos_angles, radius_deg * u.deg)
# Get RA in [0, 360) range
ra = boundary.ra.deg
dec = boundary.dec.deg
# Detect large jumps in RA (>180 deg) indicating boundary crossing
ra_diff = np.diff(ra)
jumps = np.abs(ra_diff) > 180
split_idx = np.where(jumps)[0]
# Split into continuous segments
segments = []
start = 0
for idx in split_idx:
segments.append((ra[start:idx + 1], dec[start:idx + 1]))
start = idx + 1
segments.append((ra[start:], dec[start:]))
# Plot each continuous segment
transform = ax.get_transform('icrs')
for seg_ra, seg_dec in segments:
if len(seg_ra) < 2:
continue
if fill and facecolor != 'none':
ax.fill(seg_ra, seg_dec, transform=transform,
facecolor=facecolor, edgecolor='none', alpha=alpha, zorder=zorder)
# Always draw edge so the cap boundary is visible
ax.plot(seg_ra, seg_dec, transform=transform, color=edgecolor,
linewidth=linewidth, alpha=alpha, zorder=zorder + 1)
Fibonacci Lattice Algorithm#
The Fibonacci lattice provides the most uniform distribution of points on a sphere, better than HEALPix for circle packing. It uses the golden ratio to create a spiral pattern with equal-area spacing.
def fibonacci_sphere(n_samples):
"""
Generate uniformly distributed points on a sphere using Fibonacci lattice.
This provides optimal uniform coverage - better than HEALPix for circle packing.
Based on: "How to generate equidistributed points on the surface of a sphere"
by Àlex Comas (2015) and Vogel's method.
Parameters:
-----------
n_samples : int
Number of points to generate
Returns:
--------
ra, dec : arrays
Right ascension and declination in degrees
"""
golden_ratio = (1 + np.sqrt(5)) / 2
# Generate indices
i = np.arange(n_samples)
# Latitude: uniform distribution in cos(theta) ensures equal-area spacing
theta = np.arccos(1 - 2 * (i + 0.5) / n_samples)
# Longitude: golden angle spiral
phi = 2 * np.pi * i / golden_ratio
# Convert to RA, Dec
dec = 90 - np.rad2deg(theta)
ra = np.rad2deg(phi) % 360
return ra, dec
def optimize_circle_coverage(r_circle_deg, target_coverage=0.9):
"""
Find optimal number of circles to cover target probability on skymap.
Uses Fibonacci lattice for uniform initial distribution + greedy selection.
OPTIMIZED: Uses numpy vectorization instead of nested loops.
Key features:
- Hexagonal-like packing: circles spaced 2r apart (only pairs touch, no triple overlap)
- No double-counting: tracks covered pixels to compute only new probability added
- FAST: Vectorized numpy operations, not Python loops
Parameters:
-----------
r_circle_deg : float
Circle radius in degrees
target_coverage : float
Target probability coverage (e.g., 0.9 for 90%)
Returns:
--------
selected_circles : list of tuples
(ra, dec, probability_added) for each selected circle
"""
r_rad = np.deg2rad(r_circle_deg)
# Estimate sphere area that can be covered by one circle (spherical cap area)
cap_area = 2 * np.pi * (1 - np.cos(r_rad)) # steradians
sphere_area = 4 * np.pi # steradians
# For hexagonal packing on a SPHERE, accounting for curvature:
# Optimal spacing for hexagonal close packing of spherical caps:
# d = 2 * arcsin(sqrt(3)/2 * sin(r))
# For small angles: d ≈ sqrt(3) * r
target_spacing_rad = 2 * np.arcsin(np.sqrt(3)/2 * np.sin(r_rad))
# Each circle "owns" area in hexagonal tessellation
# Voronoi cell area ≈ 2√3 r² for planar, adjusted for sphere
packing_efficiency = 0.907 # hexagonal close packing efficiency
n_circles_theoretical = int(np.ceil(sphere_area / (cap_area * packing_efficiency)))
# Estimate number of Fibonacci points for target spacing
points_per_steradian = 1 / (target_spacing_rad ** 2)
n_candidates = int(sphere_area * points_per_steradian * 1.5) # 50% extra for greedy selection
print(f"Circle radius: {r_circle_deg}°")
print(f"Spherical cap area: {np.rad2deg(np.rad2deg(cap_area)):.2f} deg²")
print(f"Hexagonal packing spacing (spherical): {np.rad2deg(target_spacing_rad):.3f}°")
print(f" (≈ {np.rad2deg(target_spacing_rad)/r_circle_deg:.3f} × r, vs √3={np.sqrt(3):.3f} in planar case)")
print(f"Theoretical circles for full coverage: {n_circles_theoretical}")
print(f"Generating {n_candidates} candidate positions using Fibonacci lattice...")
# Generate candidate centers using Fibonacci lattice
ra_centers, dec_centers = fibonacci_sphere(n_candidates)
# Prepare circle data with probability information
nside = hp.npix2nside(len(skymap))
circle_data = []
print("Computing probability in each circle...")
for ra_c, dec_c in zip(ra_centers, dec_centers):
theta_c = np.radians(90 - dec_c)
phi_c = np.radians(ra_c)
xyz_c = hp.ang2vec(theta_c, phi_c)
# Get pixels within the circle (as numpy array for faster operations)
ipix_disc = np.array(hp.query_disc(nside, xyz_c, r_rad, nest=True), dtype=np.int32)
# Total probability in this circle
total_prob = np.nansum(skymap[ipix_disc])
circle_data.append((ra_c, dec_c, ipix_disc, total_prob))
# ============ OPTIMIZED GREEDY SELECTION (vectorized, not loops) ============
print(f"Running greedy selection for {target_coverage:.0%} coverage...")
print("(Each circle only counts probability from pixels not yet covered)\n")
selected_circles = []
covered_pixels = np.zeros(len(skymap), dtype=bool) # Boolean mask instead of set
cumulative_prob = 0.0
iteration = 0
max_iterations = len(circle_data) # Safety limit
while cumulative_prob < target_coverage and circle_data and iteration < max_iterations:
iteration += 1
# VECTORIZED: compute new probability for ALL circles at once
# Convert covered_pixels to numpy for fast masking
best_idx = -1
best_new_prob = 0.0
best_uncovered_mask = None
for idx, (ra_c, dec_c, ipix_disc, _) in enumerate(circle_data):
# Fast numpy masking: find pixels in circle that aren't covered
uncovered_mask = ~covered_pixels[ipix_disc]
# Sum only uncovered probabilities (no double-counting)
new_prob = np.nansum(skymap[ipix_disc[uncovered_mask]])
if new_prob > best_new_prob:
best_new_prob = new_prob
best_idx = idx
best_uncovered_mask = uncovered_mask
# If no circle adds new probability, stop
if best_idx == -1 or best_new_prob == 0:
break
# Add best circle to selection
ra_c, dec_c, ipix_disc, _ = circle_data[best_idx]
selected_circles.append((ra_c, dec_c, best_new_prob))
# Mark newly covered pixels (fast boolean indexing)
covered_pixels[ipix_disc[best_uncovered_mask]] = True
cumulative_prob += best_new_prob
# Remove selected circle from candidates
circle_data.pop(best_idx)
# Progress update every 10 circles
if len(selected_circles) % 10 == 0:
avg_new_prob = cumulative_prob / len(selected_circles)
print(f" Iteration {iteration}: {len(selected_circles)} circles, "
f"coverage: {cumulative_prob:.2%}, avg new prob/circle: {avg_new_prob:.3%}")
print(f"\n✓ Selected {len(selected_circles)} circles covering {cumulative_prob:.2%}")
if len(selected_circles) > 0:
print(f" Average new probability per circle: {cumulative_prob/len(selected_circles):.3%}")
print(f" Total unique pixels covered: {np.sum(covered_pixels):,} (no double-counting)")
return selected_circles
# Test with different circle sizes
r_circle = 10 # degrees
selected_circles = optimize_circle_coverage(r_circle, target_coverage=0.9)
# Visualize the selected circles on the skymap
fig = plt.figure(figsize=(14, 8), dpi=300)
fig.patch.set_visible(False)
ax = plt.axes(projection='astro degrees mollweide')
ax.set_facecolor('none')
ax.grid()
# Plot skymap
ax.imshow_hpx((skymap, 'ICRS'), nested=True, cmap='Blues', alpha=0.6)
ax.contour_hpx((cls, 'ICRS'), nested=True, colors='black',
levels=(50, 90), zorder=1, linestyles=['dashed', 'solid'],
linewidths=0.7)
# Plot selected circles as spherical caps
from astropy.coordinates import SkyCoord
print(f"Plotting {len(selected_circles)} optimized circles...")
for ra_c, dec_c, prob in selected_circles:
plot_spherical_cap(ax, center_lon_deg=ra_c, center_lat_deg=dec_c,
radius_deg=r_circle,
npoints=360, fill=False, facecolor='none',
edgecolor='red', alpha=0.35, linewidth=0.6)
plt.title(f'Optimal Fibonacci Lattice Tessellation\n{len(selected_circles)} circles (r={r_circle}°) covering 90% probability',
fontsize=12, fontweight='bold')
plt.tight_layout()
plt.show()
Cumulative Coverage Analysis#
Show how probability accumulates as we add circles, ranked by their contribution:
# Configure matplotlib for transparent background
import matplotlib
matplotlib.rcParams['savefig.transparent'] = True
matplotlib.rcParams['figure.facecolor'] = 'none'
matplotlib.rcParams['axes.facecolor'] = 'none'
# Plot cumulative probability coverage as circles are added
if len(selected_circles) > 0:
# Extract probabilities added by each circle (already sorted by greedy algorithm)
individual_probs = np.array([prob for _, _, prob in selected_circles])
# Cumulative sum
cumulative_coverage = np.cumsum(individual_probs)
# Circle indices (1-indexed for display)
circle_numbers = np.arange(1, len(selected_circles) + 1)
# Create figure with two subplots
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 5))
# Plot 1: Cumulative coverage
ax1.plot(circle_numbers, cumulative_coverage * 100, 'o-',
linewidth=2, markersize=4, color='steelblue', label='Cumulative coverage')
ax1.axhline(90, color='red', linestyle='--', linewidth=1.5,
label='90% target', alpha=0.7)
ax1.fill_between(circle_numbers, 0, cumulative_coverage * 100,
alpha=0.2, color='steelblue')
ax1.set_xlabel('Number of Circles', fontsize=11)
ax1.set_ylabel('Cumulative Probability Coverage (%)', fontsize=11)
ax1.set_title('Greedy Circle Selection: Cumulative Coverage',
fontsize=12, fontweight='bold')
ax1.legend(loc='lower right')
ax1.grid(True, alpha=0.3)
ax1.set_xlim(0, len(selected_circles))
ax1.set_ylim(0, 100)
# Add annotation for 90% point
idx_90 = np.searchsorted(cumulative_coverage, 0.9)
if idx_90 < len(selected_circles):
ax1.plot(idx_90 + 1, cumulative_coverage[idx_90] * 100, 'ro',
markersize=10, zorder=5)
ax1.annotate(f'{idx_90 + 1} circles\nfor 90%',
xy=(idx_90 + 1, cumulative_coverage[idx_90] * 100),
xytext=(idx_90 + 1 + len(selected_circles)*0.15, 75),
fontsize=10, color='red',
arrowprops=dict(arrowstyle='->', color='red', lw=1.5),
bbox=dict(boxstyle='round,pad=0.5', facecolor='white',
edgecolor='red', alpha=0.8))
# Plot 2: Individual contribution per circle (descending order)
ax2.bar(circle_numbers, individual_probs * 100,
color='coral', alpha=0.7, edgecolor='black', linewidth=0.5)
ax2.set_xlabel('Circle Rank (by contribution)', fontsize=11)
ax2.set_ylabel('Probability Added (%)', fontsize=11)
ax2.set_title('Individual Circle Contribution (Descending Order)',
fontsize=12, fontweight='bold')
ax2.grid(True, alpha=0.3, axis='y')
# Add mean line
mean_contrib = np.mean(individual_probs) * 100
ax2.axhline(mean_contrib, color='blue', linestyle='--', linewidth=1.5,
label=f'Mean: {mean_contrib:.2f}%', alpha=0.7)
ax2.legend()
plt.tight_layout()
plt.show()
# Print statistics
print(f"\n=== Coverage Statistics ===")
print(f"Total circles selected: {len(selected_circles)}")
print(f"Final coverage: {cumulative_coverage[-1]:.2%}")
print(f"\nCircle contributions:")
print(f" Top circle: {individual_probs[0]:.2%}")
print(f" 10th circle: {individual_probs[9]:.2%}" if len(individual_probs) > 9 else "")
print(f" Mean: {mean_contrib/100:.2%}")
print(f" Last circle: {individual_probs[-1]:.2%}")
# Efficiency metrics
if len(selected_circles) >= 10:
efficiency_first_10 = np.sum(individual_probs[:10])
efficiency_overall = cumulative_coverage[-1] / len(selected_circles)
print(f"\nEfficiency:")
print(f" First 10 circles: {efficiency_first_10:.2%} of total")
print(f" Average per circle: {efficiency_overall:.3%}")
Receive GW skymap through kafka notice#
This method allows you to receive the map immediately, without the need of doing an additional query of the database. Extemely performant!
# inside parse_notice function
from base64 import b64decode
from io import BytesIO
import json
from astropy.table import Table
import astropy_healpix as ah
def parse_notice(record):
skymap_str = record.get('event', {}).pop('skymap')
if skymap_str:
# Decode, parse skymap, and print most probable sky location
skymap_bytes = b64decode(skymap_str)
skymap = Table.read(BytesIO(skymap_bytes))
level, ipix = ah.uniq_to_level_ipix(
skymap[np.argmax(skymap['PROBDENSITY'])]['UNIQ']
)
ra, dec = ah.healpix_to_lonlat(ipix, ah.level_to_nside(level),
order='nested')
print(f'Most probable sky location (RA, Dec) = ({ra.deg}, {dec.deg})')
# Print some information from FITS header
print(f'Distance = {skymap.meta["DISTMEAN"]} +/- {skymap.meta["DISTSTD"]}')
Some weirdly shaped localization maps#
import ligo.skymap.io.fits
import ligo.skymap.postprocess.util
import numpy as np
import matplotlib.pyplot as pp
from matplotlib.colors import LogNorm
from matplotlib import cm
import ligo.skymap.plot
skymap, _ = ligo.skymap.io.fits.read_sky_map('771151674_0_n_PROBMAP.fits', nest = True) # NEST = True ensures that the order is rearranged properly, both for flat and multi-order maps
fig = pp.figure(dpi=300)
fig.patch.set_visible(False)
ax = pp.axes(projection='astro degrees mollweide')
ax.set_facecolor('none')
ax.grid()
cls = 100 * ligo.skymap.postprocess.util.find_greedy_credible_levels(skymap)
vmax = np.percentile(skymap[~np.isnan(skymap)], 99.0)
vmin = np.percentile(skymap[~np.isnan(skymap)], 10.0)
vmin = max(vmax/1e3, vmin)
skymap[cls > 90] = np.nan
skymap[skymap == 0] = np.nan
ax.imshow_hpx((skymap, 'ICRS'), nested=True, cmap=cm.Oranges, norm=LogNorm(vmin=vmin, vmax=vmax), zorder=0)
ax.contour_hpx((cls, 'ICRS'), nested=True, colors='black', levels=(50, 90), zorder=1, linestyles=['dashed', 'solid'], linewidths=0.5)
pp.show()
skymap, _ = ligo.skymap.io.fits.read_sky_map('771151674_0_n_PROBMAP.fits', nest = True) # NEST = True ensures that the order is rearranged properly, both for flat and multi-order maps
ax = pp.axes(projection='astro degrees zoom',
center=f'170 d -55 d', radius='10 deg')
cls = 100 * ligo.skymap.postprocess.util.find_greedy_credible_levels(skymap)
vmax = np.percentile(skymap[~np.isnan(skymap)], 99.0)
vmin = np.percentile(skymap[~np.isnan(skymap)], 10.0)
vmin = max(vmax/1e3, vmin)
skymap[cls > 100] = np.nan
skymap[skymap == 0] = np.nan
ax.imshow_hpx((skymap, 'ICRS'), nested=True, cmap=cm.Oranges, norm=LogNorm(vmin=vmin, vmax=vmax), zorder=0)
ax.contour_hpx((cls, 'ICRS'), nested=True, colors='black', levels=(50, 90), zorder=1, linestyles=['dashed', 'solid'])
Let’s take a look also another weird map http://www.ioffe.ru/LEA/GRBs/GRB251105_T03511/IPN/
# exercise for you: plot the 90% credible region and find ra and dec of the peak probability location