Plot cloud field properties derived from ASTER imgages on February 5
Posted on Do 06 August 2020 in cloud field properties • by Theresa Mieslinger
In [6]:
%pylab inline
%matplotlib inline
import warnings
warnings.simplefilter("ignore")
import matplotlib.pyplot as plt
import numpy as np
import xarray as xr
from scipy.optimize import curve_fit
import typhon as ty
from typhon.cloudmask import aster
from utils import *
In [7]:
plt.style.use(ty.plots.styles.get('typhon'))
List of ASTER images on Feb 5
In [3]:
files0205 = ["TERRA_ASTER_processed_20200205_142515.nc",
"TERRA_ASTER_processed_20200205_142524.nc",
"TERRA_ASTER_processed_20200205_142533.nc",
"TERRA_ASTER_processed_20200205_142542.nc",
"TERRA_ASTER_processed_20200205_142551.nc",
"TERRA_ASTER_processed_20200205_142600.nc",
"TERRA_ASTER_processed_20200205_142608.nc",
"TERRA_ASTER_processed_20200205_142617.nc",
"TERRA_ASTER_processed_20200205_142626.nc",
"TERRA_ASTER_processed_20200205_142635.nc",
"TERRA_ASTER_processed_20200205_142644.nc",
"TERRA_ASTER_processed_20200205_142653.nc",
"TERRA_ASTER_processed_20200205_142702.nc",
"TERRA_ASTER_processed_20200205_142710.nc",
"TERRA_ASTER_processed_20200205_142719.nc",
"TERRA_ASTER_processed_20200205_142728.nc",
"TERRA_ASTER_processed_20200205_142737.nc",
]
Read in cloud properties from Feb 5¶
In [4]:
cloudsizes = []
cloudtopheights = []
cloudfraction = []
cloudclustering = []
for i, file in enumerate(files0205):
ds = xr.open_dataset(f"cloudstatistics_{file}")
cloudsizes.append(ds.cloudsizes)
cloudtopheights.append(ds.cloudtopheight)
cloudfraction.append(ds.cloudfraction)
cloudclustering.append(ds.Iorg)
np.shape(cloudsizes)
Out[4]:
In [91]:
%%time
# flatten list of cloudsizes and cloud top heights
sizes = np.array([item for sublist in cloudsizes for item in sublist])
print("cloudfraction: ", np.nanmin(sizes), np.nanmax(sizes), sizes.size)
heights = np.array([item for sublist in cloudtopheights for item in sublist]) / 1000 # km
print("cloud top height: ", np.nanmin(heights), np.nanmax(heights), heights.size)
cloud size distribution¶
binning¶
In [30]:
# 0.033 km is the smallest possible equivalent diameter for 4 pixels of 15m x 15m each
bins = np.linspace(np.log10(0.033),np.log10(20), 30)
binmids = (bins[1:] + bins[:-1]) / 2
frequency¶
In [31]:
n = np.histogram(np.log10(sizes), bins=bins, normed=False)[0]
n_log = np.log10(n)
In [60]:
# quicklook
plt.loglog(10**binmids[np.isfinite(n_log)], n[np.isfinite(n_log)], ls="None", marker="o")
Out[60]:
double power law fit: scaling parameters¶
In [61]:
# double power law
popt, pcov = curve_fit(linfit2, binmids[np.isfinite(n_log)][2:],
n_log[np.isfinite(n_log)][2:])
In [62]:
#scale break: intersection of 2 linear slopes
dc = (popt[3] - popt[1]) / (popt[0] - popt[2])
csd = {'y': 10**(n_log[np.isfinite(n_log)]),
'y_fit': 10**(linfit2(binmids[np.isfinite(n_log)], *popt)),
'binmids': 10**(binmids[np.isfinite(n_log)]),
'slopes': ( np.nanmax((popt[0], popt[2])),
np.nanmin((popt[0], popt[2])) ),
'slopes_err': (np.sqrt(np.diag(pcov))[0],
np.sqrt(np.diag(pcov))[2]),
'scalebreak': 10**(dc)}
In [95]:
fig,ax = plt.subplots(figsize=ty.plots.figsize(10))
# data
ax.step(csd['binmids'], csd['y'])
#fit
ax.loglog(csd['binmids'], csd['y_fit'],
linestyle='-', marker=None, color="C3",
label='$slope_{1}=$'+str(np.round(csd['slopes'][0],2))+'\n'
+'$slope_{2}=$'+str(np.round(csd['slopes'][1],2)))
# scale break
ax.axvline(x=csd['scalebreak'], color="C3", ls='--',
label="scalebreak = {}".format(round(csd["scalebreak"], 3)))
ax.set_xlabel("Cloud area equivalent diameter / km")
ax.set_ylabel("Frequency")
ax.legend()
Out[95]:
In [96]:
fig.savefig("aster_cloudsizedistribution.pdf", bbox_inches="tight")
Combine cloud sizes and their cloud top height¶
The cloud mask height retrieval only derives an estimate for a 90m pixel if all 36 15m sub-pixels are classified confidently cloudy. Therefore, about half of all clouds (> 4 pixel) on the 5 of February don't have a cloud top height estimate.
In [76]:
np.sum(np.isnan(heights)) / heights.size
Out[76]:
In [74]:
# quicklook
plt.hist(heights, bins=30)
Out[74]:
In [97]:
fig, ax = plt.subplots()
ax.set_xscale('log')
ax.hist2d(sizes[np.isfinite(heights)], heights[np.isfinite(heights)],
bins=[10**binmids, np.arange(0, np.nanmax(heights), 0.1)])
ax.set_xlabel("cloud area equivalent diameter / km")
ax.set_ylabel("cloud top height / km")
Out[97]:
In [98]:
fig.savefig("aster_2dhist_cloudsize_vs_cloudtopheight.pdf", bbox_inches="tight")
cloud clustering¶
In [5]:
plt.plot(cloudclustering)
Out[5]:
Compare images 5 and 11
In [12]:
im = [5, 11]
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 7))
for i, ax in enumerate([ax1, ax2]):
url=f"http://eurec4a:barbados@observations.ipsl.fr/thredds/dodsC/EUREC4A/SATELLITES/TERRA/ASTER/nc/{files0205[im[i]]}"
store = xr.backends.PydapDataStore.open(url)
ds = xr.open_dataset(store)
ds.cloudmask.plot(ax=ax)
Sun glint in image 11 leards to "false clouds" in the cloud mask such that the calculated Iorg of those does not reflect cloud spacing but rather retrieval noise.
In [ ]: