"""A module for converting numbers or color arguments to *RGB* or *RGBA*.*RGB* and *RGBA* are sequences of, respectively, 3 or 4 floats in therange 0-1.This module includes functions and classes for color specification conversions,and for mapping numbers to colors in a 1-D array of colors called a colormap.Mapping data onto colors using a colormap typically involves two steps: a dataarray is first mapped onto the range 0-1 using a subclass of `Normalize`,then this number is mapped to a color using a subclass of `Colormap`. Twosubclasses of `Colormap` provided here: `LinearSegmentedColormap`, which usespiecewise-linear interpolation to define colormaps, and `ListedColormap`, whichmakes a colormap from a list of colors... seealso:: :doc:`/tutorials/colors/colormap-manipulation` for examples of how to make colormaps and :doc:`/tutorials/colors/colormaps` for a list of built-in colormaps. :doc:`/tutorials/colors/colormapnorms` for more details about data normalization More colormaps are available at palettable_.The module also provides functions for checking whether an object can beinterpreted as a color (`is_color_like`), for converting such an objectto an RGBA tuple (`to_rgba`) or to an HTML-like hex string in the"#rrggbb" format (`to_hex`), and a sequence of colors to an (n, 4)RGBA array (`to_rgba_array`). Caching is used for efficiency.Colors that Matplotlib recognizes are listed at:doc:`/tutorials/colors/colors`... _palettable: https://jiffyclub.github.io/palettable/.. _xkcd color survey: https://xkcd.com/color/rgb/"""importbase64fromcollections.abcimportSized,Sequence,MappingimportfunctoolsimportimportlibimportinspectimportioimportitertoolsfromnumbersimportNumberimportrefromPILimportImagefromPIL.PngImagePluginimportPngInfoimportmatplotlibasmplimportnumpyasnpfrommatplotlibimport_api,_cm,cbook,scalefrom._color_dataimportBASE_COLORS,TABLEAU_COLORS,CSS4_COLORS,XKCD_COLORSclass_ColorMapping(dict):def__init__(self,mapping):super().__init__(mapping)self.cache={}def__setitem__(self,key,value):super().__setitem__(key,value)self.cache.clear()def__delitem__(self,key):super().__delitem__(key)self.cache.clear()_colors_full_map={}# Set by reverse priority order._colors_full_map.update(XKCD_COLORS)_colors_full_map.update({k.replace('grey','gray'):vfork,vinXKCD_COLORS.items()if'grey'ink})_colors_full_map.update(CSS4_COLORS)_colors_full_map.update(TABLEAU_COLORS)_colors_full_map.update({k.replace('gray','grey'):vfork,vinTABLEAU_COLORS.items()if'gray'ink})_colors_full_map.update(BASE_COLORS)_colors_full_map=_ColorMapping(_colors_full_map)_REPR_PNG_SIZE=(512,64)defget_named_colors_mapping():"""Return the global mapping of names to named colors."""return_colors_full_mapclassColorSequenceRegistry(Mapping):r""" Container for sequences of colors that are known to Matplotlib by name. The universal registry instance is `matplotlib.color_sequences`. There should be no need for users to instantiate `.ColorSequenceRegistry` themselves. Read access uses a dict-like interface mapping names to lists of colors:: import matplotlib as mpl cmap = mpl.color_sequences['tab10'] The returned lists are copies, so that their modification does not change the global definition of the color sequence. Additional color sequences can be added via `.ColorSequenceRegistry.register`:: mpl.color_sequences.register('rgb', ['r', 'g', 'b']) """_BUILTIN_COLOR_SEQUENCES={'tab10':_cm._tab10_data,'tab20':_cm._tab20_data,'tab20b':_cm._tab20b_data,'tab20c':_cm._tab20c_data,'Pastel1':_cm._Pastel1_data,'Pastel2':_cm._Pastel2_data,'Paired':_cm._Paired_data,'Accent':_cm._Accent_data,'Dark2':_cm._Dark2_data,'Set1':_cm._Set1_data,'Set2':_cm._Set1_data,'Set3':_cm._Set1_data,}def__init__(self):self._color_sequences={**self._BUILTIN_COLOR_SEQUENCES}def__getitem__(self,item):try:returnlist(self._color_sequences[item])exceptKeyError:raiseKeyError(f"{item!r} is not a known color sequence name")def__iter__(self):returniter(self._color_sequences)def__len__(self):returnlen(self._color_sequences)def__str__(self):return('ColorSequenceRegistry; available colormaps:\n'+', '.join(f"'{name}'"fornameinself))defregister(self,name,color_list):""" Register a new color sequence. The color sequence registry stores a copy of the given *color_list*, so that future changes to the original list do not affect the registered color sequence. Think of this as the registry taking a snapshot of *color_list* at registration. Parameters ---------- name : str The name for the color sequence. color_list : list of colors An iterable returning valid Matplotlib colors when iterating over. Note however that the returned color sequence will always be a list regardless of the input type. """ifnameinself._BUILTIN_COLOR_SEQUENCES:raiseValueError(f"{name!r} is a reserved name for a builtin ""color sequence")color_list=list(color_list)# force copy and coerce type to listforcolorincolor_list:try:to_rgba(color)exceptValueError:raiseValueError(f"{color!r} is not a valid color specification")self._color_sequences[name]=color_listdefunregister(self,name):""" Remove a sequence from the registry. You cannot remove built-in color sequences. If the name is not registered, returns with no error. """ifnameinself._BUILTIN_COLOR_SEQUENCES:raiseValueError(f"Cannot unregister builtin color sequence {name!r}")self._color_sequences.pop(name,None)_color_sequences=ColorSequenceRegistry()def_sanitize_extrema(ex):ifexisNone:returnextry:ret=ex.item()exceptAttributeError:ret=float(ex)returnretdef_is_nth_color(c):"""Return whether *c* can be interpreted as an item in the color cycle."""returnisinstance(c,str)andre.match(r"\AC[0-9]+\Z",c)defis_color_like(c):"""Return whether *c* can be interpreted as an RGB(A) color."""# Special-case nth color syntax because it cannot be parsed during setup.if_is_nth_color(c):returnTruetry:to_rgba(c)exceptValueError:returnFalseelse:returnTruedef_has_alpha_channel(c):"""Return whether *c* is a color with an alpha channel."""# 4-element sequences are interpreted as r, g, b, areturnnotisinstance(c,str)andlen(c)==4def_check_color_like(**kwargs):""" For each *key, value* pair in *kwargs*, check that *value* is color-like. """fork,vinkwargs.items():ifnotis_color_like(v):raiseValueError(f"{v!r} is not a valid value for {k}")defsame_color(c1,c2):""" Return whether the colors *c1* and *c2* are the same. *c1*, *c2* can be single colors or lists/arrays of colors. """c1=to_rgba_array(c1)c2=to_rgba_array(c2)n1=max(c1.shape[0],1)# 'none' results in shape (0, 4), but is 1-elemn2=max(c2.shape[0],1)# 'none' results in shape (0, 4), but is 1-elemifn1!=n2:raiseValueError('Different number of elements passed.')# The following shape test is needed to correctly handle comparisons with# 'none', which results in a shape (0, 4) array and thus cannot be tested# via value comparison.returnc1.shape==c2.shapeand(c1==c2).all()defto_rgba(c,alpha=None):""" Convert *c* to an RGBA color. Parameters ---------- c : Matplotlib color or ``np.ma.masked`` alpha : float, optional If *alpha* is given, force the alpha value of the returned RGBA tuple to *alpha*. If None, the alpha value from *c* is used. If *c* does not have an alpha channel, then alpha defaults to 1. *alpha* is ignored for the color value ``"none"`` (case-insensitive), which always maps to ``(0, 0, 0, 0)``. Returns ------- tuple Tuple of floats ``(r, g, b, a)``, where each channel (red, green, blue, alpha) can assume values between 0 and 1. """# Special-case nth color syntax because it should not be cached.if_is_nth_color(c):prop_cycler=mpl.rcParams['axes.prop_cycle']colors=prop_cycler.by_key().get('color',['k'])c=colors[int(c[1:])%len(colors)]try:rgba=_colors_full_map.cache[c,alpha]except(KeyError,TypeError):# Not in cache, or unhashable.rgba=NoneifrgbaisNone:# Suppress exception chaining of cache lookup failure.rgba=_to_rgba_no_colorcycle(c,alpha)try:_colors_full_map.cache[c,alpha]=rgbaexceptTypeError:passreturnrgbadef_to_rgba_no_colorcycle(c,alpha=None):""" Convert *c* to an RGBA color, with no support for color-cycle syntax. If *alpha* is given, force the alpha value of the returned RGBA tuple to *alpha*. Otherwise, the alpha value from *c* is used, if it has alpha information, or defaults to 1. *alpha* is ignored for the color value ``"none"`` (case-insensitive), which always maps to ``(0, 0, 0, 0)``. """orig_c=cifcisnp.ma.masked:return(0.,0.,0.,0.)ifisinstance(c,str):ifc.lower()=="none":return(0.,0.,0.,0.)# Named color.try:# This may turn c into a non-string, so we check again below.c=_colors_full_map[c]exceptKeyError:iflen(orig_c)!=1:try:c=_colors_full_map[c.lower()]exceptKeyError:passifisinstance(c,str):# hex color in #rrggbb format.match=re.match(r"\A#[a-fA-F0-9]{6}\Z",c)ifmatch:return(tuple(int(n,16)/255fornin[c[1:3],c[3:5],c[5:7]])+(alphaifalphaisnotNoneelse1.,))# hex color in #rgb format, shorthand for #rrggbb.match=re.match(r"\A#[a-fA-F0-9]{3}\Z",c)ifmatch:return(tuple(int(n,16)/255fornin[c[1]*2,c[2]*2,c[3]*2])+(alphaifalphaisnotNoneelse1.,))# hex color with alpha in #rrggbbaa format.match=re.match(r"\A#[a-fA-F0-9]{8}\Z",c)ifmatch:color=[int(n,16)/255fornin[c[1:3],c[3:5],c[5:7],c[7:9]]]ifalphaisnotNone:color[-1]=alphareturntuple(color)# hex color with alpha in #rgba format, shorthand for #rrggbbaa.match=re.match(r"\A#[a-fA-F0-9]{4}\Z",c)ifmatch:color=[int(n,16)/255fornin[c[1]*2,c[2]*2,c[3]*2,c[4]*2]]ifalphaisnotNone:color[-1]=alphareturntuple(color)# string gray.try:c=float(c)exceptValueError:passelse:ifnot(0<=c<=1):raiseValueError(f"Invalid string grayscale value {orig_c!r}. "f"Value must be within 0-1 range")returnc,c,c,alphaifalphaisnotNoneelse1.raiseValueError(f"Invalid RGBA argument: {orig_c!r}")# turn 2-D array into 1-D arrayifisinstance(c,np.ndarray):ifc.ndim==2andc.shape[0]==1:c=c.reshape(-1)# tuple color.ifnotnp.iterable(c):raiseValueError(f"Invalid RGBA argument: {orig_c!r}")iflen(c)notin[3,4]:raiseValueError("RGBA sequence should have length 3 or 4")ifnotall(isinstance(x,Number)forxinc):# Checks that don't work: `map(float, ...)`, `np.array(..., float)` and# `np.array(...).astype(float)` would all convert "0.5" to 0.5.raiseValueError(f"Invalid RGBA argument: {orig_c!r}")# Return a tuple to prevent the cached value from being modified.c=tuple(map(float,c))iflen(c)==3andalphaisNone:alpha=1ifalphaisnotNone:c=c[:3]+(alpha,)ifany(elem<0orelem>1foreleminc):raiseValueError("RGBA values should be within 0-1 range")returncdefto_rgba_array(c,alpha=None):""" Convert *c* to a (n, 4) array of RGBA colors. Parameters ---------- c : Matplotlib color or array of colors If *c* is a masked array, an `~numpy.ndarray` is returned with a (0, 0, 0, 0) row for each masked value or row in *c*. alpha : float or sequence of floats, optional If *alpha* is given, force the alpha value of the returned RGBA tuple to *alpha*. If None, the alpha value from *c* is used. If *c* does not have an alpha channel, then alpha defaults to 1. *alpha* is ignored for the color value ``"none"`` (case-insensitive), which always maps to ``(0, 0, 0, 0)``. If *alpha* is a sequence and *c* is a single color, *c* will be repeated to match the length of *alpha*. Returns ------- array (n, 4) array of RGBA colors, where each channel (red, green, blue, alpha) can assume values between 0 and 1. """# Special-case inputs that are already arrays, for performance. (If the# array has the wrong kind or shape, raise the error during one-at-a-time# conversion.)ifnp.iterable(alpha):alpha=np.asarray(alpha).ravel()if(isinstance(c,np.ndarray)andc.dtype.kindin"if"andc.ndim==2andc.shape[1]in[3,4]):mask=c.mask.any(axis=1)ifnp.ma.is_masked(c)elseNonec=np.ma.getdata(c)ifnp.iterable(alpha):ifc.shape[0]==1andalpha.shape[0]>1:c=np.tile(c,(alpha.shape[0],1))elifc.shape[0]!=alpha.shape[0]:raiseValueError("The number of colors must match the number"" of alpha values if there are more than one"" of each.")ifc.shape[1]==3:result=np.column_stack([c,np.zeros(len(c))])result[:,-1]=alphaifalphaisnotNoneelse1.elifc.shape[1]==4:result=c.copy()ifalphaisnotNone:result[:,-1]=alphaifmaskisnotNone:result[mask]=0ifnp.any((result<0)|(result>1)):raiseValueError("RGBA values should be within 0-1 range")returnresult# Handle single values.# Note that this occurs *after* handling inputs that are already arrays, as# `to_rgba(c, alpha)` (below) is expensive for such inputs, due to the need# to format the array in the ValueError message(!).ifcbook._str_lower_equal(c,"none"):returnnp.zeros((0,4),float)try:ifnp.iterable(alpha):returnnp.array([to_rgba(c,a)forainalpha],float)else:returnnp.array([to_rgba(c,alpha)],float)except(ValueError,TypeError):passifisinstance(c,str):raiseValueError(f"{c!r} is not a valid color value.")iflen(c)==0:returnnp.zeros((0,4),float)# Quick path if the whole sequence can be directly converted to a numpy# array in one shot.ifisinstance(c,Sequence):lens={len(cc)ifisinstance(cc,(list,tuple))else-1forccinc}iflens=={3}:rgba=np.column_stack([c,np.ones(len(c))])eliflens=={4}:rgba=np.array(c)else:rgba=np.array([to_rgba(cc)forccinc])else:rgba=np.array([to_rgba(cc)forccinc])ifalphaisnotNone:rgba[:,3]=alphareturnrgbadefto_rgb(c):"""Convert *c* to an RGB color, silently dropping the alpha channel."""returnto_rgba(c)[:3]defto_hex(c,keep_alpha=False):""" Convert *c* to a hex color. Parameters ---------- c : :doc:`color </tutorials/colors/colors>` or `numpy.ma.masked` keep_alpha : bool, default: False If False, use the ``#rrggbb`` format, otherwise use ``#rrggbbaa``. Returns ------- str ``#rrggbb`` or ``#rrggbbaa`` hex color string """c=to_rgba(c)ifnotkeep_alpha:c=c[:3]return"#"+"".join(format(round(val*255),"02x")forvalinc)### Backwards-compatible color-conversion APIcnames=CSS4_COLORShexColorPattern=re.compile(r"\A#[a-fA-F0-9]{6}\Z")rgb2hex=to_hexhex2color=to_rgbclassColorConverter:""" A class only kept for backwards compatibility. Its functionality is entirely provided by module-level functions. """colors=_colors_full_mapcache=_colors_full_map.cacheto_rgb=staticmethod(to_rgb)to_rgba=staticmethod(to_rgba)to_rgba_array=staticmethod(to_rgba_array)colorConverter=ColorConverter()### End of backwards-compatible color-conversion APIdef_create_lookup_table(N,data,gamma=1.0):r""" Create an *N* -element 1D lookup table. This assumes a mapping :math:`f : [0, 1] \rightarrow [0, 1]`. The returned data is an array of N values :math:`y = f(x)` where x is sampled from [0, 1]. By default (*gamma* = 1) x is equidistantly sampled from [0, 1]. The *gamma* correction factor :math:`\gamma` distorts this equidistant sampling by :math:`x \rightarrow x^\gamma`. Parameters ---------- N : int The number of elements of the created lookup table; at least 1. data : (M, 3) array-like or callable Defines the mapping :math:`f`. If a (M, 3) array-like, the rows define values (x, y0, y1). The x values must start with x=0, end with x=1, and all x values be in increasing order. A value between :math:`x_i` and :math:`x_{i+1}` is mapped to the range :math:`y^1_{i-1} \ldots y^0_i` by linear interpolation. For the simple case of a y-continuous mapping, y0 and y1 are identical. The two values of y are to allow for discontinuous mapping functions. E.g. a sawtooth with a period of 0.2 and an amplitude of 1 would be:: [(0, 1, 0), (0.2, 1, 0), (0.4, 1, 0), ..., [(1, 1, 0)] In the special case of ``N == 1``, by convention the returned value is y0 for x == 1. If *data* is a callable, it must accept and return numpy arrays:: data(x : ndarray) -> ndarray and map values between 0 - 1 to 0 - 1. gamma : float Gamma correction factor for input distribution x of the mapping. See also https://en.wikipedia.org/wiki/Gamma_correction. Returns ------- array The lookup table where ``lut[x * (N-1)]`` gives the closest value for values of x between 0 and 1. Notes ----- This function is internally used for `.LinearSegmentedColormap`. """ifcallable(data):xind=np.linspace(0,1,N)**gammalut=np.clip(np.array(data(xind),dtype=float),0,1)returnluttry:adata=np.array(data)exceptExceptionaserr:raiseTypeError("data must be convertible to an array")fromerr_api.check_shape((None,3),data=adata)x=adata[:,0]y0=adata[:,1]y1=adata[:,2]ifx[0]!=0.orx[-1]!=1.0:raiseValueError("data mapping points must start with x=0 and end with x=1")if(np.diff(x)<0).any():raiseValueError("data mapping points must have x in increasing order")# begin generation of lookup tableifN==1:# convention: use the y = f(x=1) value for a 1-element lookup tablelut=np.array(y0[-1])else:x=x*(N-1)xind=(N-1)*np.linspace(0,1,N)**gammaind=np.searchsorted(x,xind)[1:-1]distance=(xind[1:-1]-x[ind-1])/(x[ind]-x[ind-1])lut=np.concatenate([[y1[0]],distance*(y0[ind]-y1[ind-1])+y1[ind-1],[y0[-1]],])# ensure that the lut is confined to values between 0 and 1 by clipping itreturnnp.clip(lut,0.0,1.0)classColormap:""" Baseclass for all scalar to RGBA mappings. Typically, Colormap instances are used to convert data values (floats) from the interval ``[0, 1]`` to the RGBA color that the respective Colormap represents. For scaling of data into the ``[0, 1]`` interval see `matplotlib.colors.Normalize`. Subclasses of `matplotlib.cm.ScalarMappable` make heavy use of this ``data -> normalize -> map-to-color`` processing chain. """def__init__(self,name,N=256):""" Parameters ---------- name : str The name of the colormap. N : int The number of RGB quantization levels. """self.name=nameself.N=int(N)# ensure that N is always intself._rgba_bad=(0.0,0.0,0.0,0.0)# If bad, don't paint anything.self._rgba_under=Noneself._rgba_over=Noneself._i_under=self.Nself._i_over=self.N+1self._i_bad=self.N+2self._isinit=False#: When this colormap exists on a scalar mappable and colorbar_extend#: is not False, colorbar creation will pick up ``colorbar_extend`` as#: the default value for the ``extend`` keyword in the#: `matplotlib.colorbar.Colorbar` constructor.self.colorbar_extend=Falsedef__call__(self,X,alpha=None,bytes=False):""" Parameters ---------- X : float or int, `~numpy.ndarray` or scalar The data value(s) to convert to RGBA. For floats, *X* should be in the interval ``[0.0, 1.0]`` to return the RGBA values ``X*100`` percent along the Colormap line. For integers, *X* should be in the interval ``[0, Colormap.N)`` to return RGBA values *indexed* from the Colormap with index ``X``. alpha : float or array-like or None Alpha must be a scalar between 0 and 1, a sequence of such floats with shape matching X, or None. bytes : bool If False (default), the returned RGBA values will be floats in the interval ``[0, 1]`` otherwise they will be uint8s in the interval ``[0, 255]``. Returns ------- Tuple of RGBA values if X is scalar, otherwise an array of RGBA values with a shape of ``X.shape + (4, )``. """ifnotself._isinit:self._init()# Take the bad mask from a masked array, or in all other cases defer# np.isnan() to after we have converted to an array.mask_bad=X.maskifnp.ma.is_masked(X)elseNonexa=np.array(X,copy=True)ifmask_badisNone:mask_bad=np.isnan(xa)ifnotxa.dtype.isnative:xa=xa.byteswap().newbyteorder()# Native byteorder is faster.ifxa.dtype.kind=="f":xa*=self.N# Negative values are out of range, but astype(int) would# truncate them towards zero.xa[xa<0]=-1# xa == 1 (== N after multiplication) is not out of range.xa[xa==self.N]=self.N-1# Avoid converting large positive values to negative integers.np.clip(xa,-1,self.N,out=xa)withnp.errstate(invalid="ignore"):# We need this cast for unsigned ints as well as floatsxa=xa.astype(int)# Set the over-range indices before the under-range;# otherwise the under-range values get converted to over-range.xa[xa>self.N-1]=self._i_overxa[xa<0]=self._i_underxa[mask_bad]=self._i_badlut=self._lutifbytes:lut=(lut*255).astype(np.uint8)rgba=lut.take(xa,axis=0,mode='clip')ifalphaisnotNone:alpha=np.clip(alpha,0,1)ifbytes:alpha*=255# Will be cast to uint8 upon assignment.ifalpha.shapenotin[(),xa.shape]:raiseValueError(f"alpha is array-like but its shape {alpha.shape} does "f"not match that of X {xa.shape}")rgba[...,-1]=alpha# If the "bad" color is all zeros, then ignore alpha input.if(lut[-1]==0).all()andnp.any(mask_bad):ifnp.iterable(mask_bad)andmask_bad.shape==xa.shape:rgba[mask_bad]=(0,0,0,0)else:rgba[...,:]=(0,0,0,0)ifnotnp.iterable(X):rgba=tuple(rgba)returnrgbadef__copy__(self):cls=self.__class__cmapobject=cls.__new__(cls)cmapobject.__dict__.update(self.__dict__)ifself._isinit:cmapobject._lut=np.copy(self._lut)returncmapobjectdef__eq__(self,other):if(notisinstance(other,Colormap)orself.name!=other.nameorself.colorbar_extend!=other.colorbar_extend):returnFalse# To compare lookup tables the Colormaps have to be initializedifnotself._isinit:self._init()ifnotother._isinit:other._init()returnnp.array_equal(self._lut,other._lut)
[docs]defget_bad(self):"""Get the color for masked values."""ifnotself._isinit:self._init()returnnp.array(self._lut[self._i_bad])
[docs]defset_bad(self,color='k',alpha=None):"""Set the color for masked values."""self._rgba_bad=to_rgba(color,alpha)ifself._isinit:self._set_extremes()
[docs]defget_under(self):"""Get the color for low out-of-range values."""ifnotself._isinit:self._init()returnnp.array(self._lut[self._i_under])
[docs]defset_under(self,color='k',alpha=None):"""Set the color for low out-of-range values."""self._rgba_under=to_rgba(color,alpha)ifself._isinit:self._set_extremes()
[docs]defget_over(self):"""Get the color for high out-of-range values."""ifnotself._isinit:self._init()returnnp.array(self._lut[self._i_over])
[docs]defset_over(self,color='k',alpha=None):"""Set the color for high out-of-range values."""self._rgba_over=to_rgba(color,alpha)ifself._isinit:self._set_extremes()
[docs]defset_extremes(self,*,bad=None,under=None,over=None):""" Set the colors for masked (*bad*) values and, when ``norm.clip = False``, low (*under*) and high (*over*) out-of-range values. """ifbadisnotNone:self.set_bad(bad)ifunderisnotNone:self.set_under(under)ifoverisnotNone:self.set_over(over)
[docs]defwith_extremes(self,*,bad=None,under=None,over=None):""" Return a copy of the colormap, for which the colors for masked (*bad*) values and, when ``norm.clip = False``, low (*under*) and high (*over*) out-of-range values, have been set accordingly. """new_cm=self.copy()new_cm.set_extremes(bad=bad,under=under,over=over)returnnew_cm
def_set_extremes(self):ifself._rgba_under:self._lut[self._i_under]=self._rgba_underelse:self._lut[self._i_under]=self._lut[0]ifself._rgba_over:self._lut[self._i_over]=self._rgba_overelse:self._lut[self._i_over]=self._lut[self.N-1]self._lut[self._i_bad]=self._rgba_baddef_init(self):"""Generate the lookup table, ``self._lut``."""raiseNotImplementedError("Abstract class only")
[docs]defis_gray(self):"""Return whether the colormap is grayscale."""ifnotself._isinit:self._init()return(np.all(self._lut[:,0]==self._lut[:,1])andnp.all(self._lut[:,0]==self._lut[:,2]))
[docs]defresampled(self,lutsize):"""Return a new colormap with *lutsize* entries."""ifhasattr(self,'_resample'):_api.warn_external("The ability to resample a color map is now public API "f"However the class {type(self)} still only implements ""the previous private _resample method. Please update ""your class.")returnself._resample(lutsize)raiseNotImplementedError()
defreversed(self,name=None):""" Return a reversed instance of the Colormap. .. note:: This function is not implemented for the base class. Parameters ---------- name : str, optional The name for the reversed colormap. If None, the name is set to ``self.name + "_r"``. See Also -------- LinearSegmentedColormap.reversed ListedColormap.reversed """raiseNotImplementedError()def_repr_png_(self):"""Generate a PNG representation of the Colormap."""X=np.tile(np.linspace(0,1,_REPR_PNG_SIZE[0]),(_REPR_PNG_SIZE[1],1))pixels=self(X,bytes=True)png_bytes=io.BytesIO()title=self.name+' colormap'author=f'Matplotlib v{mpl.__version__}, https://matplotlib.org'pnginfo=PngInfo()pnginfo.add_text('Title',title)pnginfo.add_text('Description',title)pnginfo.add_text('Author',author)pnginfo.add_text('Software',author)Image.fromarray(pixels).save(png_bytes,format='png',pnginfo=pnginfo)returnpng_bytes.getvalue()def_repr_html_(self):"""Generate an HTML representation of the Colormap."""png_bytes=self._repr_png_()png_base64=base64.b64encode(png_bytes).decode('ascii')defcolor_block(color):hex_color=to_hex(color,keep_alpha=True)return(f'<div title="{hex_color}" ''style="display: inline-block; ''width: 1em; height: 1em; ''margin: 0; ''vertical-align: middle; ''border: 1px solid #555; 'f'background-color: {hex_color};"></div>')return('<div style="vertical-align: middle;">'f'<strong>{self.name}</strong> ''</div>''<div class="cmap"><img 'f'alt="{self.name} colormap" 'f'title="{self.name}" ''style="border: 1px solid #555;" 'f'src="data:image/png;base64,{png_base64}"></div>''<div style="vertical-align: middle; 'f'max-width: {_REPR_PNG_SIZE[0]+2}px; ''display: flex; justify-content: space-between;">''<div style="float: left;">'f'{color_block(self.get_under())} under''</div>''<div style="margin: 0 auto; display: inline-block;">'f'bad {color_block(self.get_bad())}''</div>''<div style="float: right;">'f'over {color_block(self.get_over())}''</div>')
[docs]defcopy(self):"""Return a copy of the colormap."""returnself.__copy__()
classLinearSegmentedColormap(Colormap):""" Colormap objects based on lookup tables using linear segments. The lookup table is generated using linear interpolation for each primary color, with the 0-1 domain divided into any number of segments. """def__init__(self,name,segmentdata,N=256,gamma=1.0):""" Create colormap from linear mapping segments segmentdata argument is a dictionary with a red, green and blue entries. Each entry should be a list of *x*, *y0*, *y1* tuples, forming rows in a table. Entries for alpha are optional. Example: suppose you want red to increase from 0 to 1 over the bottom half, green to do the same over the middle half, and blue over the top half. Then you would use:: cdict = {'red': [(0.0, 0.0, 0.0), (0.5, 1.0, 1.0), (1.0, 1.0, 1.0)], 'green': [(0.0, 0.0, 0.0), (0.25, 0.0, 0.0), (0.75, 1.0, 1.0), (1.0, 1.0, 1.0)], 'blue': [(0.0, 0.0, 0.0), (0.5, 0.0, 0.0), (1.0, 1.0, 1.0)]} Each row in the table for a given color is a sequence of *x*, *y0*, *y1* tuples. In each sequence, *x* must increase monotonically from 0 to 1. For any input value *z* falling between *x[i]* and *x[i+1]*, the output value of a given color will be linearly interpolated between *y1[i]* and *y0[i+1]*:: row i: x y0 y1 / / row i+1: x y0 y1 Hence y0 in the first row and y1 in the last row are never used. See Also -------- LinearSegmentedColormap.from_list Static method; factory function for generating a smoothly-varying LinearSegmentedColormap. """# True only if all colors in map are identical; needed for contouring.self.monochrome=Falsesuper().__init__(name,N)self._segmentdata=segmentdataself._gamma=gammadef_init(self):self._lut=np.ones((self.N+3,4),float)self._lut[:-3,0]=_create_lookup_table(self.N,self._segmentdata['red'],self._gamma)self._lut[:-3,1]=_create_lookup_table(self.N,self._segmentdata['green'],self._gamma)self._lut[:-3,2]=_create_lookup_table(self.N,self._segmentdata['blue'],self._gamma)if'alpha'inself._segmentdata:self._lut[:-3,3]=_create_lookup_table(self.N,self._segmentdata['alpha'],1)self._isinit=Trueself._set_extremes()defset_gamma(self,gamma):"""Set a new gamma value and regenerate colormap."""self._gamma=gammaself._init()@staticmethoddeffrom_list(name,colors,N=256,gamma=1.0):""" Create a `LinearSegmentedColormap` from a list of colors. Parameters ---------- name : str The name of the colormap. colors : array-like of colors or array-like of (value, color) If only colors are given, they are equidistantly mapped from the range :math:`[0, 1]`; i.e. 0 maps to ``colors[0]`` and 1 maps to ``colors[-1]``. If (value, color) pairs are given, the mapping is from *value* to *color*. This can be used to divide the range unevenly. N : int The number of RGB quantization levels. gamma : float """ifnotnp.iterable(colors):raiseValueError('colors must be iterable')if(isinstance(colors[0],Sized)andlen(colors[0])==2andnotisinstance(colors[0],str)):# List of value, color pairsvals,colors=zip(*colors)else:vals=np.linspace(0,1,len(colors))r,g,b,a=to_rgba_array(colors).Tcdict={"red":np.column_stack([vals,r,r]),"green":np.column_stack([vals,g,g]),"blue":np.column_stack([vals,b,b]),"alpha":np.column_stack([vals,a,a]),}returnLinearSegmentedColormap(name,cdict,N,gamma)defresampled(self,lutsize):"""Return a new colormap with *lutsize* entries."""new_cmap=LinearSegmentedColormap(self.name,self._segmentdata,lutsize)new_cmap._rgba_over=self._rgba_overnew_cmap._rgba_under=self._rgba_undernew_cmap._rgba_bad=self._rgba_badreturnnew_cmap# Helper ensuring picklability of the reversed cmap.@staticmethoddef_reverser(func,x):returnfunc(1-x)defreversed(self,name=None):""" Return a reversed instance of the Colormap. Parameters ---------- name : str, optional The name for the reversed colormap. If None, the name is set to ``self.name + "_r"``. Returns ------- LinearSegmentedColormap The reversed colormap. """ifnameisNone:name=self.name+"_r"# Using a partial object keeps the cmap picklable.data_r={key:(functools.partial(self._reverser,data)ifcallable(data)else[(1.0-x,y1,y0)forx,y0,y1inreversed(data)])forkey,datainself._segmentdata.items()}new_cmap=LinearSegmentedColormap(name,data_r,self.N,self._gamma)# Reverse the over/under values toonew_cmap._rgba_over=self._rgba_undernew_cmap._rgba_under=self._rgba_overnew_cmap._rgba_bad=self._rgba_badreturnnew_cmapclassListedColormap(Colormap):""" Colormap object generated from a list of colors. This may be most useful when indexing directly into a colormap, but it can also be used to generate special colormaps for ordinary mapping. Parameters ---------- colors : list, array List of Matplotlib color specifications, or an equivalent Nx3 or Nx4 floating point array (*N* RGB or RGBA values). name : str, optional String to identify the colormap. N : int, optional Number of entries in the map. The default is *None*, in which case there is one colormap entry for each element in the list of colors. If :: N < len(colors) the list will be truncated at *N*. If :: N > len(colors) the list will be extended by repetition. """def__init__(self,colors,name='from_list',N=None):self.monochrome=False# Are all colors identical? (for contour.py)ifNisNone:self.colors=colorsN=len(colors)else:ifisinstance(colors,str):self.colors=[colors]*Nself.monochrome=Trueelifnp.iterable(colors):iflen(colors)==1:self.monochrome=Trueself.colors=list(itertools.islice(itertools.cycle(colors),N))else:try:gray=float(colors)exceptTypeError:passelse:self.colors=[gray]*Nself.monochrome=Truesuper().__init__(name,N)def_init(self):self._lut=np.zeros((self.N+3,4),float)self._lut[:-3]=to_rgba_array(self.colors)self._isinit=Trueself._set_extremes()defresampled(self,lutsize):"""Return a new colormap with *lutsize* entries."""colors=self(np.linspace(0,1,lutsize))new_cmap=ListedColormap(colors,name=self.name)# Keep the over/under values toonew_cmap._rgba_over=self._rgba_overnew_cmap._rgba_under=self._rgba_undernew_cmap._rgba_bad=self._rgba_badreturnnew_cmapdefreversed(self,name=None):""" Return a reversed instance of the Colormap. Parameters ---------- name : str, optional The name for the reversed colormap. If None, the name is set to ``self.name + "_r"``. Returns ------- ListedColormap A reversed instance of the colormap. """ifnameisNone:name=self.name+"_r"colors_r=list(reversed(self.colors))new_cmap=ListedColormap(colors_r,name=name,N=self.N)# Reverse the over/under values toonew_cmap._rgba_over=self._rgba_undernew_cmap._rgba_under=self._rgba_overnew_cmap._rgba_bad=self._rgba_badreturnnew_cmapclassNormalize:""" A class which, when called, linearly normalizes data into the ``[0.0, 1.0]`` interval. """def__init__(self,vmin=None,vmax=None,clip=False):""" Parameters ---------- vmin, vmax : float or None If *vmin* and/or *vmax* is not given, they are initialized from the minimum and maximum value, respectively, of the first input processed; i.e., ``__call__(A)`` calls ``autoscale_None(A)``. clip : bool, default: False If ``True`` values falling outside the range ``[vmin, vmax]``, are mapped to 0 or 1, whichever is closer, and masked values are set to 1. If ``False`` masked values remain masked. Clipping silently defeats the purpose of setting the over, under, and masked colors in a colormap, so it is likely to lead to surprises; therefore the default is ``clip=False``. Notes ----- Returns 0 if ``vmin == vmax``. """self._vmin=_sanitize_extrema(vmin)self._vmax=_sanitize_extrema(vmax)self._clip=clipself._scale=Noneself.callbacks=cbook.CallbackRegistry(signals=["changed"])@propertydefvmin(self):returnself._vmin@vmin.setterdefvmin(self,value):value=_sanitize_extrema(value)ifvalue!=self._vmin:self._vmin=valueself._changed()@propertydefvmax(self):returnself._vmax@vmax.setterdefvmax(self,value):value=_sanitize_extrema(value)ifvalue!=self._vmax:self._vmax=valueself._changed()@propertydefclip(self):returnself._clip@clip.setterdefclip(self,value):ifvalue!=self._clip:self._clip=valueself._changed()def_changed(self):""" Call this whenever the norm is changed to notify all the callback listeners to the 'changed' signal. """self.callbacks.process('changed')@staticmethoddefprocess_value(value):""" Homogenize the input *value* for easy and efficient normalization. *value* can be a scalar or sequence. Returns ------- result : masked array Masked array with the same shape as *value*. is_scalar : bool Whether *value* is a scalar. Notes ----- Float dtypes are preserved; integer types with two bytes or smaller are converted to np.float32, and larger types are converted to np.float64. Preserving float32 when possible, and using in-place operations, greatly improves speed for large arrays. """is_scalar=notnp.iterable(value)ifis_scalar:value=[value]dtype=np.min_scalar_type(value)ifnp.issubdtype(dtype,np.integer)ordtype.typeisnp.bool_:# bool_/int8/int16 -> float32; int32/int64 -> float64dtype=np.promote_types(dtype,np.float32)# ensure data passed in as an ndarray subclass are interpreted as# an ndarray. See issue #6622.mask=np.ma.getmask(value)data=np.asarray(value)result=np.ma.array(data,mask=mask,dtype=dtype,copy=True)returnresult,is_scalardef__call__(self,value,clip=None):""" Normalize *value* data in the ``[vmin, vmax]`` interval into the ``[0.0, 1.0]`` interval and return it. Parameters ---------- value Data to normalize. clip : bool, optional If ``None``, defaults to ``self.clip`` (which defaults to ``False``). Notes ----- If not already initialized, ``self.vmin`` and ``self.vmax`` are initialized using ``self.autoscale_None(value)``. """ifclipisNone:clip=self.clipresult,is_scalar=self.process_value(value)ifself.vminisNoneorself.vmaxisNone:self.autoscale_None(result)# Convert at least to float, without losing precision.(vmin,),_=self.process_value(self.vmin)(vmax,),_=self.process_value(self.vmax)ifvmin==vmax:result.fill(0)# Or should it be all masked? Or 0.5?elifvmin>vmax:raiseValueError("minvalue must be less than or equal to maxvalue")else:ifclip:mask=np.ma.getmask(result)result=np.ma.array(np.clip(result.filled(vmax),vmin,vmax),mask=mask)# ma division is very slow; we can take a shortcutresdat=result.dataresdat-=vminresdat/=(vmax-vmin)result=np.ma.array(resdat,mask=result.mask,copy=False)ifis_scalar:result=result[0]returnresultdefinverse(self,value):ifnotself.scaled():raiseValueError("Not invertible until both vmin and vmax are set")(vmin,),_=self.process_value(self.vmin)(vmax,),_=self.process_value(self.vmax)ifnp.iterable(value):val=np.ma.asarray(value)returnvmin+val*(vmax-vmin)else:returnvmin+value*(vmax-vmin)defautoscale(self,A):"""Set *vmin*, *vmax* to min, max of *A*."""withself.callbacks.blocked():# Pause callbacks while we are updating so we only get# a single update signal at the endself.vmin=self.vmax=Noneself.autoscale_None(A)self._changed()defautoscale_None(self,A):"""If vmin or vmax are not set, use the min/max of *A* to set them."""A=np.asanyarray(A)ifself.vminisNoneandA.size:self.vmin=A.min()ifself.vmaxisNoneandA.size:self.vmax=A.max()defscaled(self):"""Return whether vmin and vmax are set."""returnself.vminisnotNoneandself.vmaxisnotNoneclassTwoSlopeNorm(Normalize):def__init__(self,vcenter,vmin=None,vmax=None):""" Normalize data with a set center. Useful when mapping data with an unequal rates of change around a conceptual center, e.g., data that range from -2 to 4, with 0 as the midpoint. Parameters ---------- vcenter : float The data value that defines ``0.5`` in the normalization. vmin : float, optional The data value that defines ``0.0`` in the normalization. Defaults to the min value of the dataset. vmax : float, optional The data value that defines ``1.0`` in the normalization. Defaults to the max value of the dataset. Examples -------- This maps data value -4000 to 0., 0 to 0.5, and +10000 to 1.0; data between is linearly interpolated:: >>> import matplotlib.colors as mcolors >>> offset = mcolors.TwoSlopeNorm(vmin=-4000., vcenter=0., vmax=10000) >>> data = [-4000., -2000., 0., 2500., 5000., 7500., 10000.] >>> offset(data) array([0., 0.25, 0.5, 0.625, 0.75, 0.875, 1.0]) """super().__init__(vmin=vmin,vmax=vmax)self._vcenter=vcenterifvcenterisnotNoneandvmaxisnotNoneandvcenter>=vmax:raiseValueError('vmin, vcenter, and vmax must be in ''ascending order')ifvcenterisnotNoneandvminisnotNoneandvcenter<=vmin:raiseValueError('vmin, vcenter, and vmax must be in ''ascending order')@propertydefvcenter(self):returnself._vcenter@vcenter.setterdefvcenter(self,value):ifvalue!=self._vcenter:self._vcenter=valueself._changed()defautoscale_None(self,A):""" Get vmin and vmax, and then clip at vcenter """super().autoscale_None(A)ifself.vmin>self.vcenter:self.vmin=self.vcenterifself.vmax<self.vcenter:self.vmax=self.vcenterdef__call__(self,value,clip=None):""" Map value to the interval [0, 1]. The *clip* argument is unused. """result,is_scalar=self.process_value(value)self.autoscale_None(result)# sets self.vmin, self.vmax if Noneifnotself.vmin<=self.vcenter<=self.vmax:raiseValueError("vmin, vcenter, vmax must increase monotonically")# note that we must extrapolate for tick locators:result=np.ma.masked_array(np.interp(result,[self.vmin,self.vcenter,self.vmax],[0,0.5,1],left=-np.inf,right=np.inf),mask=np.ma.getmask(result))ifis_scalar:result=np.atleast_1d(result)[0]returnresultdefinverse(self,value):ifnotself.scaled():raiseValueError("Not invertible until both vmin and vmax are set")(vmin,),_=self.process_value(self.vmin)(vmax,),_=self.process_value(self.vmax)(vcenter,),_=self.process_value(self.vcenter)result=np.interp(value,[0,0.5,1],[vmin,vcenter,vmax],left=-np.inf,right=np.inf)returnresultclassCenteredNorm(Normalize):def__init__(self,vcenter=0,halfrange=None,clip=False):""" Normalize symmetrical data around a center (0 by default). Unlike `TwoSlopeNorm`, `CenteredNorm` applies an equal rate of change around the center. Useful when mapping symmetrical data around a conceptual center e.g., data that range from -2 to 4, with 0 as the midpoint, and with equal rates of change around that midpoint. Parameters ---------- vcenter : float, default: 0 The data value that defines ``0.5`` in the normalization. halfrange : float, optional The range of data values that defines a range of ``0.5`` in the normalization, so that *vcenter* - *halfrange* is ``0.0`` and *vcenter* + *halfrange* is ``1.0`` in the normalization. Defaults to the largest absolute difference to *vcenter* for the values in the dataset. clip : bool, default: False If ``True`` values falling outside the range ``[vmin, vmax]``, are mapped to 0 or 1, whichever is closer, and masked values are set to 1. If ``False`` masked values remain masked. Examples -------- This maps data values -2 to 0.25, 0 to 0.5, and 4 to 1.0 (assuming equal rates of change above and below 0.0): >>> import matplotlib.colors as mcolors >>> norm = mcolors.CenteredNorm(halfrange=4.0) >>> data = [-2., 0., 4.] >>> norm(data) array([0.25, 0.5 , 1. ]) """super().__init__(vmin=None,vmax=None,clip=clip)self._vcenter=vcenter# calling the halfrange setter to set vmin and vmaxself.halfrange=halfrangedefautoscale(self,A):""" Set *halfrange* to ``max(abs(A-vcenter))``, then set *vmin* and *vmax*. """A=np.asanyarray(A)self.halfrange=max(self._vcenter-A.min(),A.max()-self._vcenter)defautoscale_None(self,A):"""Set *vmin* and *vmax*."""A=np.asanyarray(A)ifself.halfrangeisNoneandA.size:self.autoscale(A)@propertydefvmin(self):returnself._vmin@vmin.setterdefvmin(self,value):value=_sanitize_extrema(value)ifvalue!=self._vmin:self._vmin=valueself._vmax=2*self.vcenter-valueself._changed()@propertydefvmax(self):returnself._vmax@vmax.setterdefvmax(self,value):value=_sanitize_extrema(value)ifvalue!=self._vmax:self._vmax=valueself._vmin=2*self.vcenter-valueself._changed()@propertydefvcenter(self):returnself._vcenter@vcenter.setterdefvcenter(self,vcenter):ifvcenter!=self._vcenter:self._vcenter=vcenter# Trigger an update of the vmin/vmax values through the setterself.halfrange=self.halfrangeself._changed()@propertydefhalfrange(self):ifself.vminisNoneorself.vmaxisNone:returnNonereturn(self.vmax-self.vmin)/2@halfrange.setterdefhalfrange(self,halfrange):ifhalfrangeisNone:self.vmin=Noneself.vmax=Noneelse:self.vmin=self.vcenter-abs(halfrange)self.vmax=self.vcenter+abs(halfrange)defmake_norm_from_scale(scale_cls,base_norm_cls=None,*,init=None):""" Decorator for building a `.Normalize` subclass from a `~.scale.ScaleBase` subclass. After :: @make_norm_from_scale(scale_cls) class norm_cls(Normalize): ... *norm_cls* is filled with methods so that normalization computations are forwarded to *scale_cls* (i.e., *scale_cls* is the scale that would be used for the colorbar of a mappable normalized with *norm_cls*). If *init* is not passed, then the constructor signature of *norm_cls* will be ``norm_cls(vmin=None, vmax=None, clip=False)``; these three parameters will be forwarded to the base class (``Normalize.__init__``), and a *scale_cls* object will be initialized with no arguments (other than a dummy axis). If the *scale_cls* constructor takes additional parameters, then *init* should be passed to `make_norm_from_scale`. It is a callable which is *only* used for its signature. First, this signature will become the signature of *norm_cls*. Second, the *norm_cls* constructor will bind the parameters passed to it using this signature, extract the bound *vmin*, *vmax*, and *clip* values, pass those to ``Normalize.__init__``, and forward the remaining bound values (including any defaults defined by the signature) to the *scale_cls* constructor. """ifbase_norm_clsisNone:returnfunctools.partial(make_norm_from_scale,scale_cls,init=init)ifisinstance(scale_cls,functools.partial):scale_args=scale_cls.argsscale_kwargs_items=tuple(scale_cls.keywords.items())scale_cls=scale_cls.funcelse:scale_args=scale_kwargs_items=()ifinitisNone:definit(vmin=None,vmax=None,clip=False):passreturn_make_norm_from_scale(scale_cls,scale_args,scale_kwargs_items,base_norm_cls,inspect.signature(init))@functools.lru_cache(None)def_make_norm_from_scale(scale_cls,scale_args,scale_kwargs_items,base_norm_cls,bound_init_signature,):""" Helper for `make_norm_from_scale`. This function is split out to enable caching (in particular so that different unpickles reuse the same class). In order to do so, - ``functools.partial`` *scale_cls* is expanded into ``func, args, kwargs`` to allow memoizing returned norms (partial instances always compare unequal, but we can check identity based on ``func, args, kwargs``; - *init* is replaced by *init_signature*, as signatures are picklable, unlike to arbitrary lambdas. """classNorm(base_norm_cls):def__reduce__(self):cls=type(self)# If the class is toplevel-accessible, it is possible to directly# pickle it "by name". This is required to support norm classes# defined at a module's toplevel, as the inner base_norm_cls is# otherwise unpicklable (as it gets shadowed by the generated norm# class). If either import or attribute access fails, fall back to# the general path.try:ifclsisgetattr(importlib.import_module(cls.__module__),cls.__qualname__):return(_create_empty_object_of_class,(cls,),vars(self))except(ImportError,AttributeError):passreturn(_picklable_norm_constructor,(scale_cls,scale_args,scale_kwargs_items,base_norm_cls,bound_init_signature),vars(self))def__init__(self,*args,**kwargs):ba=bound_init_signature.bind(*args,**kwargs)ba.apply_defaults()super().__init__(**{k:ba.arguments.pop(k)forkin["vmin","vmax","clip"]})self._scale=functools.partial(scale_cls,*scale_args,**dict(scale_kwargs_items))(axis=None,**ba.arguments)self._trf=self._scale.get_transform()__init__.__signature__=bound_init_signature.replace(parameters=[inspect.Parameter("self",inspect.Parameter.POSITIONAL_OR_KEYWORD),*bound_init_signature.parameters.values()])def__call__(self,value,clip=None):value,is_scalar=self.process_value(value)ifself.vminisNoneorself.vmaxisNone:self.autoscale_None(value)ifself.vmin>self.vmax:raiseValueError("vmin must be less or equal to vmax")ifself.vmin==self.vmax:returnnp.full_like(value,0)ifclipisNone:clip=self.clipifclip:value=np.clip(value,self.vmin,self.vmax)t_value=self._trf.transform(value).reshape(np.shape(value))t_vmin,t_vmax=self._trf.transform([self.vmin,self.vmax])ifnotnp.isfinite([t_vmin,t_vmax]).all():raiseValueError("Invalid vmin or vmax")t_value-=t_vmint_value/=(t_vmax-t_vmin)t_value=np.ma.masked_invalid(t_value,copy=False)returnt_value[0]ifis_scalarelset_valuedefinverse(self,value):ifnotself.scaled():raiseValueError("Not invertible until scaled")ifself.vmin>self.vmax:raiseValueError("vmin must be less or equal to vmax")t_vmin,t_vmax=self._trf.transform([self.vmin,self.vmax])ifnotnp.isfinite([t_vmin,t_vmax]).all():raiseValueError("Invalid vmin or vmax")value,is_scalar=self.process_value(value)rescaled=value*(t_vmax-t_vmin)rescaled+=t_vminvalue=(self._trf.inverted().transform(rescaled).reshape(np.shape(value)))returnvalue[0]ifis_scalarelsevaluedefautoscale_None(self,A):# i.e. A[np.isfinite(...)], but also for non-array A'sin_trf_domain=np.extract(np.isfinite(self._trf.transform(A)),A)ifin_trf_domain.size==0:in_trf_domain=np.ma.maskedreturnsuper().autoscale_None(in_trf_domain)ifbase_norm_clsisNormalize:Norm.__name__=f"{scale_cls.__name__}Norm"Norm.__qualname__=f"{scale_cls.__qualname__}Norm"else:Norm.__name__=base_norm_cls.__name__Norm.__qualname__=base_norm_cls.__qualname__Norm.__module__=base_norm_cls.__module__Norm.__doc__=base_norm_cls.__doc__returnNormdef_create_empty_object_of_class(cls):returncls.__new__(cls)def_picklable_norm_constructor(*args):return_create_empty_object_of_class(_make_norm_from_scale(*args))@make_norm_from_scale(scale.FuncScale,init=lambdafunctions,vmin=None,vmax=None,clip=False:None)classFuncNorm(Normalize):""" Arbitrary normalization using functions for the forward and inverse. Parameters ---------- functions : (callable, callable) two-tuple of the forward and inverse functions for the normalization. The forward function must be monotonic. Both functions must have the signature :: def forward(values: array-like) -> array-like vmin, vmax : float or None If *vmin* and/or *vmax* is not given, they are initialized from the minimum and maximum value, respectively, of the first input processed; i.e., ``__call__(A)`` calls ``autoscale_None(A)``. clip : bool, default: False If ``True`` values falling outside the range ``[vmin, vmax]``, are mapped to 0 or 1, whichever is closer, and masked values are set to 1. If ``False`` masked values remain masked. Clipping silently defeats the purpose of setting the over, under, and masked colors in a colormap, so it is likely to lead to surprises; therefore the default is ``clip=False``. """LogNorm=make_norm_from_scale(functools.partial(scale.LogScale,nonpositive="mask"))(Normalize)LogNorm.__name__=LogNorm.__qualname__="LogNorm"LogNorm.__doc__="Normalize a given value to the 0-1 range on a log scale."@make_norm_from_scale(scale.SymmetricalLogScale,init=lambdalinthresh,linscale=1.,vmin=None,vmax=None,clip=False,*,base=10:None)classSymLogNorm(Normalize):""" The symmetrical logarithmic scale is logarithmic in both the positive and negative directions from the origin. Since the values close to zero tend toward infinity, there is a need to have a range around zero that is linear. The parameter *linthresh* allows the user to specify the size of this range (-*linthresh*, *linthresh*). Parameters ---------- linthresh : float The range within which the plot is linear (to avoid having the plot go to infinity around zero). linscale : float, default: 1 This allows the linear range (-*linthresh* to *linthresh*) to be stretched relative to the logarithmic range. Its value is the number of decades to use for each half of the linear range. For example, when *linscale* == 1.0 (the default), the space used for the positive and negative halves of the linear range will be equal to one decade in the logarithmic range. base : float, default: 10 """@propertydeflinthresh(self):returnself._scale.linthresh@linthresh.setterdeflinthresh(self,value):self._scale.linthresh=value@make_norm_from_scale(scale.AsinhScale,init=lambdalinear_width=1,vmin=None,vmax=None,clip=False:None)classAsinhNorm(Normalize):""" The inverse hyperbolic sine scale is approximately linear near the origin, but becomes logarithmic for larger positive or negative values. Unlike the `SymLogNorm`, the transition between these linear and logarithmic regions is smooth, which may reduce the risk of visual artifacts. .. note:: This API is provisional and may be revised in the future based on early user feedback. Parameters ---------- linear_width : float, default: 1 The effective width of the linear region, beyond which the transformation becomes asymptotically logarithmic """@propertydeflinear_width(self):returnself._scale.linear_width@linear_width.setterdeflinear_width(self,value):self._scale.linear_width=valueclassPowerNorm(Normalize):""" Linearly map a given value to the 0-1 range and then apply a power-law normalization over that range. """def__init__(self,gamma,vmin=None,vmax=None,clip=False):super().__init__(vmin,vmax,clip)self.gamma=gammadef__call__(self,value,clip=None):ifclipisNone:clip=self.clipresult,is_scalar=self.process_value(value)self.autoscale_None(result)gamma=self.gammavmin,vmax=self.vmin,self.vmaxifvmin>vmax:raiseValueError("minvalue must be less than or equal to maxvalue")elifvmin==vmax:result.fill(0)else:ifclip:mask=np.ma.getmask(result)result=np.ma.array(np.clip(result.filled(vmax),vmin,vmax),mask=mask)resdat=result.dataresdat-=vminresdat[resdat<0]=0np.power(resdat,gamma,resdat)resdat/=(vmax-vmin)**gammaresult=np.ma.array(resdat,mask=result.mask,copy=False)ifis_scalar:result=result[0]returnresultdefinverse(self,value):ifnotself.scaled():raiseValueError("Not invertible until scaled")gamma=self.gammavmin,vmax=self.vmin,self.vmaxifnp.iterable(value):val=np.ma.asarray(value)returnnp.ma.power(val,1./gamma)*(vmax-vmin)+vminelse:returnpow(value,1./gamma)*(vmax-vmin)+vminclassBoundaryNorm(Normalize):""" Generate a colormap index based on discrete intervals. Unlike `Normalize` or `LogNorm`, `BoundaryNorm` maps values to integers instead of to the interval 0-1. """# Mapping to the 0-1 interval could have been done via piece-wise linear# interpolation, but using integers seems simpler, and reduces the number# of conversions back and forth between int and float.def__init__(self,boundaries,ncolors,clip=False,*,extend='neither'):""" Parameters ---------- boundaries : array-like Monotonically increasing sequence of at least 2 bin edges: data falling in the n-th bin will be mapped to the n-th color. ncolors : int Number of colors in the colormap to be used. clip : bool, optional If clip is ``True``, out of range values are mapped to 0 if they are below ``boundaries[0]`` or mapped to ``ncolors - 1`` if they are above ``boundaries[-1]``. If clip is ``False``, out of range values are mapped to -1 if they are below ``boundaries[0]`` or mapped to *ncolors* if they are above ``boundaries[-1]``. These are then converted to valid indices by `Colormap.__call__`. extend : {'neither', 'both', 'min', 'max'}, default: 'neither' Extend the number of bins to include one or both of the regions beyond the boundaries. For example, if ``extend`` is 'min', then the color to which the region between the first pair of boundaries is mapped will be distinct from the first color in the colormap, and by default a `~matplotlib.colorbar.Colorbar` will be drawn with the triangle extension on the left or lower end. Notes ----- If there are fewer bins (including extensions) than colors, then the color index is chosen by linearly interpolating the ``[0, nbins - 1]`` range onto the ``[0, ncolors - 1]`` range, effectively skipping some colors in the middle of the colormap. """ifclipandextend!='neither':raiseValueError("'clip=True' is not compatible with 'extend'")super().__init__(vmin=boundaries[0],vmax=boundaries[-1],clip=clip)self.boundaries=np.asarray(boundaries)self.N=len(self.boundaries)ifself.N<2:raiseValueError("You must provide at least 2 boundaries "f"(1 region) but you passed in {boundaries!r}")self.Ncmap=ncolorsself.extend=extendself._scale=None# don't use the default scale.self._n_regions=self.N-1# number of colors neededself._offset=0ifextendin('min','both'):self._n_regions+=1self._offset=1ifextendin('max','both'):self._n_regions+=1ifself._n_regions>self.Ncmap:raiseValueError(f"There are {self._n_regions} color bins ""including extensions, but ncolors = "f"{ncolors}; ncolors must equal or exceed the ""number of bins")def__call__(self,value,clip=None):""" This method behaves similarly to `.Normalize.__call__`, except that it returns integers or arrays of int16. """ifclipisNone:clip=self.clipxx,is_scalar=self.process_value(value)mask=np.ma.getmaskarray(xx)# Fill masked values a value above the upper boundaryxx=np.atleast_1d(xx.filled(self.vmax+1))ifclip:np.clip(xx,self.vmin,self.vmax,out=xx)max_col=self.Ncmap-1else:max_col=self.Ncmap# this gives us the bins in the lookup table in the range# [0, _n_regions - 1] (the offset is set in the init)iret=np.digitize(xx,self.boundaries)-1+self._offset# if we have more colors than regions, stretch the region# index computed above to full range of the color bins. This# will make use of the full range (but skip some of the colors# in the middle) such that the first region is mapped to the# first color and the last region is mapped to the last color.ifself.Ncmap>self._n_regions:ifself._n_regions==1:# special case the 1 region case, pick the middle coloriret[iret==0]=(self.Ncmap-1)//2else:# otherwise linearly remap the values from the region index# to the color index spacesiret=(self.Ncmap-1)/(self._n_regions-1)*iret# cast to 16bit integers in all casesiret=iret.astype(np.int16)iret[xx<self.vmin]=-1iret[xx>=self.vmax]=max_colret=np.ma.array(iret,mask=mask)ifis_scalar:ret=int(ret[0])# assume python scalarreturnretdefinverse(self,value):""" Raises ------ ValueError BoundaryNorm is not invertible, so calling this method will always raise an error """raiseValueError("BoundaryNorm is not invertible")classNoNorm(Normalize):""" Dummy replacement for `Normalize`, for the case where we want to use indices directly in a `~matplotlib.cm.ScalarMappable`. """def__call__(self,value,clip=None):returnvaluedefinverse(self,value):returnvaluedefrgb_to_hsv(arr):""" Convert float RGB values (in the range [0, 1]), in a numpy array to HSV values. Parameters ---------- arr : (..., 3) array-like All values must be in the range [0, 1] Returns ------- (..., 3) `~numpy.ndarray` Colors converted to HSV values in range [0, 1] """arr=np.asarray(arr)# check length of the last dimension, should be _some_ sort of rgbifarr.shape[-1]!=3:raiseValueError("Last dimension of input array must be 3; ""shape {} was found.".format(arr.shape))in_shape=arr.shapearr=np.array(arr,copy=False,dtype=np.promote_types(arr.dtype,np.float32),# Don't work on ints.ndmin=2,# In case input was 1D.)out=np.zeros_like(arr)arr_max=arr.max(-1)ipos=arr_max>0delta=arr.ptp(-1)s=np.zeros_like(delta)s[ipos]=delta[ipos]/arr_max[ipos]ipos=delta>0# red is maxidx=(arr[...,0]==arr_max)&iposout[idx,0]=(arr[idx,1]-arr[idx,2])/delta[idx]# green is maxidx=(arr[...,1]==arr_max)&iposout[idx,0]=2.+(arr[idx,2]-arr[idx,0])/delta[idx]# blue is maxidx=(arr[...,2]==arr_max)&iposout[idx,0]=4.+(arr[idx,0]-arr[idx,1])/delta[idx]out[...,0]=(out[...,0]/6.0)%1.0out[...,1]=sout[...,2]=arr_maxreturnout.reshape(in_shape)defhsv_to_rgb(hsv):""" Convert HSV values to RGB. Parameters ---------- hsv : (..., 3) array-like All values assumed to be in range [0, 1] Returns ------- (..., 3) `~numpy.ndarray` Colors converted to RGB values in range [0, 1] """hsv=np.asarray(hsv)# check length of the last dimension, should be _some_ sort of rgbifhsv.shape[-1]!=3:raiseValueError("Last dimension of input array must be 3; ""shape {shp} was found.".format(shp=hsv.shape))in_shape=hsv.shapehsv=np.array(hsv,copy=False,dtype=np.promote_types(hsv.dtype,np.float32),# Don't work on ints.ndmin=2,# In case input was 1D.)h=hsv[...,0]s=hsv[...,1]v=hsv[...,2]r=np.empty_like(h)g=np.empty_like(h)b=np.empty_like(h)i=(h*6.0).astype(int)f=(h*6.0)-ip=v*(1.0-s)q=v*(1.0-s*f)t=v*(1.0-s*(1.0-f))idx=i%6==0r[idx]=v[idx]g[idx]=t[idx]b[idx]=p[idx]idx=i==1r[idx]=q[idx]g[idx]=v[idx]b[idx]=p[idx]idx=i==2r[idx]=p[idx]g[idx]=v[idx]b[idx]=t[idx]idx=i==3r[idx]=p[idx]g[idx]=q[idx]b[idx]=v[idx]idx=i==4r[idx]=t[idx]g[idx]=p[idx]b[idx]=v[idx]idx=i==5r[idx]=v[idx]g[idx]=p[idx]b[idx]=q[idx]idx=s==0r[idx]=v[idx]g[idx]=v[idx]b[idx]=v[idx]rgb=np.stack([r,g,b],axis=-1)returnrgb.reshape(in_shape)def_vector_magnitude(arr):# things that don't work here:# * np.linalg.norm: drops mask from ma.array# * np.sum: drops mask from ma.array unless entire vector is maskedsum_sq=0foriinrange(arr.shape[-1]):sum_sq+=arr[...,i,np.newaxis]**2returnnp.sqrt(sum_sq)classLightSource:""" Create a light source coming from the specified azimuth and elevation. Angles are in degrees, with the azimuth measured clockwise from north and elevation up from the zero plane of the surface. `shade` is used to produce "shaded" RGB values for a data array. `shade_rgb` can be used to combine an RGB image with an elevation map. `hillshade` produces an illumination map of a surface. """def__init__(self,azdeg=315,altdeg=45,hsv_min_val=0,hsv_max_val=1,hsv_min_sat=1,hsv_max_sat=0):""" Specify the azimuth (measured clockwise from south) and altitude (measured up from the plane of the surface) of the light source in degrees. Parameters ---------- azdeg : float, default: 315 degrees (from the northwest) The azimuth (0-360, degrees clockwise from North) of the light source. altdeg : float, default: 45 degrees The altitude (0-90, degrees up from horizontal) of the light source. Notes ----- For backwards compatibility, the parameters *hsv_min_val*, *hsv_max_val*, *hsv_min_sat*, and *hsv_max_sat* may be supplied at initialization as well. However, these parameters will only be used if "blend_mode='hsv'" is passed into `shade` or `shade_rgb`. See the documentation for `blend_hsv` for more details. """self.azdeg=azdegself.altdeg=altdegself.hsv_min_val=hsv_min_valself.hsv_max_val=hsv_max_valself.hsv_min_sat=hsv_min_satself.hsv_max_sat=hsv_max_sat@propertydefdirection(self):"""The unit vector direction towards the light source."""# Azimuth is in degrees clockwise from North. Convert to radians# counterclockwise from East (mathematical notation).az=np.radians(90-self.azdeg)alt=np.radians(self.altdeg)returnnp.array([np.cos(az)*np.cos(alt),np.sin(az)*np.cos(alt),np.sin(alt)])defhillshade(self,elevation,vert_exag=1,dx=1,dy=1,fraction=1.):""" Calculate the illumination intensity for a surface using the defined azimuth and elevation for the light source. This computes the normal vectors for the surface, and then passes them on to `shade_normals` Parameters ---------- elevation : 2D array-like The height values used to generate an illumination map vert_exag : number, optional The amount to exaggerate the elevation values by when calculating illumination. This can be used either to correct for differences in units between the x-y coordinate system and the elevation coordinate system (e.g. decimal degrees vs. meters) or to exaggerate or de-emphasize topographic effects. dx : number, optional The x-spacing (columns) of the input *elevation* grid. dy : number, optional The y-spacing (rows) of the input *elevation* grid. fraction : number, optional Increases or decreases the contrast of the hillshade. Values greater than one will cause intermediate values to move closer to full illumination or shadow (and clipping any values that move beyond 0 or 1). Note that this is not visually or mathematically the same as vertical exaggeration. Returns ------- `~numpy.ndarray` A 2D array of illumination values between 0-1, where 0 is completely in shadow and 1 is completely illuminated. """# Because most image and raster GIS data has the first row in the array# as the "top" of the image, dy is implicitly negative. This is# consistent to what `imshow` assumes, as well.dy=-dy# compute the normal vectors from the partial derivativese_dy,e_dx=np.gradient(vert_exag*elevation,dy,dx)# .view is to keep subclassesnormal=np.empty(elevation.shape+(3,)).view(type(elevation))normal[...,0]=-e_dxnormal[...,1]=-e_dynormal[...,2]=1normal/=_vector_magnitude(normal)returnself.shade_normals(normal,fraction)defshade_normals(self,normals,fraction=1.):""" Calculate the illumination intensity for the normal vectors of a surface using the defined azimuth and elevation for the light source. Imagine an artificial sun placed at infinity in some azimuth and elevation position illuminating our surface. The parts of the surface that slope toward the sun should brighten while those sides facing away should become darker. Parameters ---------- fraction : number, optional Increases or decreases the contrast of the hillshade. Values greater than one will cause intermediate values to move closer to full illumination or shadow (and clipping any values that move beyond 0 or 1). Note that this is not visually or mathematically the same as vertical exaggeration. Returns ------- `~numpy.ndarray` A 2D array of illumination values between 0-1, where 0 is completely in shadow and 1 is completely illuminated. """intensity=normals.dot(self.direction)# Apply contrast stretchimin,imax=intensity.min(),intensity.max()intensity*=fraction# Rescale to 0-1, keeping range before contrast stretch# If constant slope, keep relative scaling (i.e. flat should be 0.5,# fully occluded 0, etc.)if(imax-imin)>1e-6:# Strictly speaking, this is incorrect. Negative values should be# clipped to 0 because they're fully occluded. However, rescaling# in this manner is consistent with the previous implementation and# visually appears better than a "hard" clip.intensity-=iminintensity/=(imax-imin)intensity=np.clip(intensity,0,1)returnintensitydefshade(self,data,cmap,norm=None,blend_mode='overlay',vmin=None,vmax=None,vert_exag=1,dx=1,dy=1,fraction=1,**kwargs):""" Combine colormapped data values with an illumination intensity map (a.k.a. "hillshade") of the values. Parameters ---------- data : 2D array-like The height values used to generate a shaded map. cmap : `~matplotlib.colors.Colormap` The colormap used to color the *data* array. Note that this must be a `~matplotlib.colors.Colormap` instance. For example, rather than passing in ``cmap='gist_earth'``, use ``cmap=plt.get_cmap('gist_earth')`` instead. norm : `~matplotlib.colors.Normalize` instance, optional The normalization used to scale values before colormapping. If None, the input will be linearly scaled between its min and max. blend_mode : {'hsv', 'overlay', 'soft'} or callable, optional The type of blending used to combine the colormapped data values with the illumination intensity. Default is "overlay". Note that for most topographic surfaces, "overlay" or "soft" appear more visually realistic. If a user-defined function is supplied, it is expected to combine an MxNx3 RGB array of floats (ranging 0 to 1) with an MxNx1 hillshade array (also 0 to 1). (Call signature ``func(rgb, illum, **kwargs)``) Additional kwargs supplied to this function will be passed on to the *blend_mode* function. vmin : float or None, optional The minimum value used in colormapping *data*. If *None* the minimum value in *data* is used. If *norm* is specified, then this argument will be ignored. vmax : float or None, optional The maximum value used in colormapping *data*. If *None* the maximum value in *data* is used. If *norm* is specified, then this argument will be ignored. vert_exag : number, optional The amount to exaggerate the elevation values by when calculating illumination. This can be used either to correct for differences in units between the x-y coordinate system and the elevation coordinate system (e.g. decimal degrees vs. meters) or to exaggerate or de-emphasize topography. dx : number, optional The x-spacing (columns) of the input *elevation* grid. dy : number, optional The y-spacing (rows) of the input *elevation* grid. fraction : number, optional Increases or decreases the contrast of the hillshade. Values greater than one will cause intermediate values to move closer to full illumination or shadow (and clipping any values that move beyond 0 or 1). Note that this is not visually or mathematically the same as vertical exaggeration. **kwargs Additional kwargs are passed on to the *blend_mode* function. Returns ------- `~numpy.ndarray` An MxNx4 array of floats ranging between 0-1. """ifvminisNone:vmin=data.min()ifvmaxisNone:vmax=data.max()ifnormisNone:norm=Normalize(vmin=vmin,vmax=vmax)rgb0=cmap(norm(data))rgb1=self.shade_rgb(rgb0,elevation=data,blend_mode=blend_mode,vert_exag=vert_exag,dx=dx,dy=dy,fraction=fraction,**kwargs)# Don't overwrite the alpha channel, if present.rgb0[...,:3]=rgb1[...,:3]returnrgb0defshade_rgb(self,rgb,elevation,fraction=1.,blend_mode='hsv',vert_exag=1,dx=1,dy=1,**kwargs):""" Use this light source to adjust the colors of the *rgb* input array to give the impression of a shaded relief map with the given *elevation*. Parameters ---------- rgb : array-like An (M, N, 3) RGB array, assumed to be in the range of 0 to 1. elevation : array-like An (M, N) array of the height values used to generate a shaded map. fraction : number Increases or decreases the contrast of the hillshade. Values greater than one will cause intermediate values to move closer to full illumination or shadow (and clipping any values that move beyond 0 or 1). Note that this is not visually or mathematically the same as vertical exaggeration. blend_mode : {'hsv', 'overlay', 'soft'} or callable, optional The type of blending used to combine the colormapped data values with the illumination intensity. For backwards compatibility, this defaults to "hsv". Note that for most topographic surfaces, "overlay" or "soft" appear more visually realistic. If a user-defined function is supplied, it is expected to combine an MxNx3 RGB array of floats (ranging 0 to 1) with an MxNx1 hillshade array (also 0 to 1). (Call signature ``func(rgb, illum, **kwargs)``) Additional kwargs supplied to this function will be passed on to the *blend_mode* function. vert_exag : number, optional The amount to exaggerate the elevation values by when calculating illumination. This can be used either to correct for differences in units between the x-y coordinate system and the elevation coordinate system (e.g. decimal degrees vs. meters) or to exaggerate or de-emphasize topography. dx : number, optional The x-spacing (columns) of the input *elevation* grid. dy : number, optional The y-spacing (rows) of the input *elevation* grid. **kwargs Additional kwargs are passed on to the *blend_mode* function. Returns ------- `~numpy.ndarray` An (m, n, 3) array of floats ranging between 0-1. """# Calculate the "hillshade" intensity.intensity=self.hillshade(elevation,vert_exag,dx,dy,fraction)intensity=intensity[...,np.newaxis]# Blend the hillshade and rgb data using the specified modelookup={'hsv':self.blend_hsv,'soft':self.blend_soft_light,'overlay':self.blend_overlay,}ifblend_modeinlookup:blend=lookup[blend_mode](rgb,intensity,**kwargs)else:try:blend=blend_mode(rgb,intensity,**kwargs)exceptTypeErroraserr:raiseValueError('"blend_mode" must be callable or one of {}'.format(lookup.keys))fromerr# Only apply result where hillshade intensity isn't maskedifnp.ma.is_masked(intensity):mask=intensity.mask[...,0]foriinrange(3):blend[...,i][mask]=rgb[...,i][mask]returnblenddefblend_hsv(self,rgb,intensity,hsv_max_sat=None,hsv_max_val=None,hsv_min_val=None,hsv_min_sat=None):""" Take the input data array, convert to HSV values in the given colormap, then adjust those color values to give the impression of a shaded relief map with a specified light source. RGBA values are returned, which can then be used to plot the shaded image with imshow. The color of the resulting image will be darkened by moving the (s, v) values (in HSV colorspace) toward (hsv_min_sat, hsv_min_val) in the shaded regions, or lightened by sliding (s, v) toward (hsv_max_sat, hsv_max_val) in regions that are illuminated. The default extremes are chose so that completely shaded points are nearly black (s = 1, v = 0) and completely illuminated points are nearly white (s = 0, v = 1). Parameters ---------- rgb : `~numpy.ndarray` An MxNx3 RGB array of floats ranging from 0 to 1 (color image). intensity : `~numpy.ndarray` An MxNx1 array of floats ranging from 0 to 1 (grayscale image). hsv_max_sat : number, default: 1 The maximum saturation value that the *intensity* map can shift the output image to. hsv_min_sat : number, optional The minimum saturation value that the *intensity* map can shift the output image to. Defaults to 0. hsv_max_val : number, optional The maximum value ("v" in "hsv") that the *intensity* map can shift the output image to. Defaults to 1. hsv_min_val : number, optional The minimum value ("v" in "hsv") that the *intensity* map can shift the output image to. Defaults to 0. Returns ------- `~numpy.ndarray` An MxNx3 RGB array representing the combined images. """# Backward compatibility...ifhsv_max_satisNone:hsv_max_sat=self.hsv_max_satifhsv_max_valisNone:hsv_max_val=self.hsv_max_valifhsv_min_satisNone:hsv_min_sat=self.hsv_min_satifhsv_min_valisNone:hsv_min_val=self.hsv_min_val# Expects a 2D intensity array scaled between -1 to 1...intensity=intensity[...,0]intensity=2*intensity-1# Convert to rgb, then rgb to hsvhsv=rgb_to_hsv(rgb[:,:,0:3])hue,sat,val=np.moveaxis(hsv,-1,0)# Modify hsv values (in place) to simulate illumination.# putmask(A, mask, B) <=> A[mask] = B[mask]np.putmask(sat,(np.abs(sat)>1.e-10)&(intensity>0),(1-intensity)*sat+intensity*hsv_max_sat)np.putmask(sat,(np.abs(sat)>1.e-10)&(intensity<0),(1+intensity)*sat-intensity*hsv_min_sat)np.putmask(val,intensity>0,(1-intensity)*val+intensity*hsv_max_val)np.putmask(val,intensity<0,(1+intensity)*val-intensity*hsv_min_val)np.clip(hsv[:,:,1:],0,1,out=hsv[:,:,1:])# Convert modified hsv back to rgb.returnhsv_to_rgb(hsv)defblend_soft_light(self,rgb,intensity):""" Combine an RGB image with an intensity map using "soft light" blending, using the "pegtop" formula. Parameters ---------- rgb : `~numpy.ndarray` An MxNx3 RGB array of floats ranging from 0 to 1 (color image). intensity : `~numpy.ndarray` An MxNx1 array of floats ranging from 0 to 1 (grayscale image). Returns ------- `~numpy.ndarray` An MxNx3 RGB array representing the combined images. """return2*intensity*rgb+(1-2*intensity)*rgb**2defblend_overlay(self,rgb,intensity):""" Combine an RGB image with an intensity map using "overlay" blending. Parameters ---------- rgb : `~numpy.ndarray` An MxNx3 RGB array of floats ranging from 0 to 1 (color image). intensity : `~numpy.ndarray` An MxNx1 array of floats ranging from 0 to 1 (grayscale image). Returns ------- ndarray An MxNx3 RGB array representing the combined images. """low=2*intensity*rgbhigh=1-2*(1-intensity)*(1-rgb)returnnp.where(rgb<=0.5,low,high)deffrom_levels_and_colors(levels,colors,extend='neither'):""" A helper routine to generate a cmap and a norm instance which behave similar to contourf's levels and colors arguments. Parameters ---------- levels : sequence of numbers The quantization levels used to construct the `BoundaryNorm`. Value ``v`` is quantized to level ``i`` if ``lev[i] <= v < lev[i+1]``. colors : sequence of colors The fill color to use for each level. If *extend* is "neither" there must be ``n_level - 1`` colors. For an *extend* of "min" or "max" add one extra color, and for an *extend* of "both" add two colors. extend : {'neither', 'min', 'max', 'both'}, optional The behaviour when a value falls out of range of the given levels. See `~.Axes.contourf` for details. Returns ------- cmap : `~matplotlib.colors.Normalize` norm : `~matplotlib.colors.Colormap` """slice_map={'both':slice(1,-1),'min':slice(1,None),'max':slice(0,-1),'neither':slice(0,None),}_api.check_in_list(slice_map,extend=extend)color_slice=slice_map[extend]n_data_colors=len(levels)-1n_expected=n_data_colors+color_slice.start-(color_slice.stopor0)iflen(colors)!=n_expected:raiseValueError(f'With extend == {extend!r} and {len(levels)} levels, 'f'expected {n_expected} colors, but got {len(colors)}')cmap=ListedColormap(colors[color_slice],N=n_data_colors)ifextendin['min','both']:cmap.set_under(colors[0])else:cmap.set_under('none')ifextendin['max','both']:cmap.set_over(colors[-1])else:cmap.set_over('none')cmap.colorbar_extend=extendnorm=BoundaryNorm(levels,ncolors=n_data_colors)returncmap,norm