diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index 1e73e65b4f..7de5dd6358 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -59,3 +59,4 @@ bd535c710db78420b8e8b9d71d88d8339e899c59 cf433215b58ba8776ec5edfb0b0d80c0836ed3a0 16d57ff37859b34dab005693e3085d64e2bcd95a e8fc526e0d7818d45f171488c78392c4ff63902a +cdf40d265cc82775607a1bf25f5f527bacc97405 diff --git a/doc/source/users_guide/overview/introduction.rst b/doc/source/users_guide/overview/introduction.rst index 8056e6ab16..f8fe858c00 100644 --- a/doc/source/users_guide/overview/introduction.rst +++ b/doc/source/users_guide/overview/introduction.rst @@ -60,7 +60,9 @@ As a followup to the tools chapter, :ref:`adding-new-resolutions-section` tells In :ref:`running-special-cases-section`, again for the expert user, we give details on how to do some particularly difficult special cases. For example, we give the protocol for spinning up the |version|-BGC and CLMCN models as well as CLM with dynamic vegetation active (CNDV). We give instructions to do a spinup case from a previous case with Coupler history output for atmospheric forcing. We also give instructions on running both the prognostic crop and irrigation models. Lastly we tell the user how to use the DATM model to send historical CO2 data to CLM. -:ref:`running-single-points` outlines how to do single-point or regional simulations using |version|. This is useful to either compare |version| simulations with point observational stations, such as tower sites (which might include your own atmospheric forcing), or to do quick simulations with CLM for example to test a new parameterization. There are several different ways given on how to perform single-point simulations which range from simple PTS_MODE to more complex where you create all your own datasets, tying into :ref:`using-clm-tools-section` and also :ref:`adding-new-resolutions-section` to add the files into the build-namelist XML database. +:ref:`running-single-points` outlines how to do single-point or regional simulations using |version|. This is useful to either compare |version| simulations with point observational stations, such as tower sites (which might include your own atmospheric forcing), or to do quick simulations with CLM for example to test a new parameterization. There are several different ways given on how to perform single-point simulations which range from simple sampling of existing inputs to more complex where you create all your own datasets, tying into :ref:`using-clm-tools-section` and also :ref:`adding-new-resolutions-section` to add the files into the build-namelist XML database. + +There is also :ref:`pts_mode`, which is useful for running single points as part of the Single Column Atmospheric Model (SCAM). :ref:`troubleshooting-index` gives some guidance on trouble-shooting problems when using |version|. It doesn't cover all possible problems with CLM, but gives you some guidelines for things that can be done for some common problems. diff --git a/doc/source/users_guide/running-single-points/index.rst b/doc/source/users_guide/running-single-points/index.rst index ba342d0ba9..f65e9c5bf3 100644 --- a/doc/source/users_guide/running-single-points/index.rst +++ b/doc/source/users_guide/running-single-points/index.rst @@ -15,6 +15,7 @@ Running Single Point Regional Cases :maxdepth: 2 single-point-and-regional-grid-configurations.rst - running-pts_mode-configurations.rst + running-single-point-subset-data.rst running-single-point-configurations.rst + running-pts_mode-configurations.rst diff --git a/doc/source/users_guide/running-single-points/running-pts_mode-configurations.rst b/doc/source/users_guide/running-single-points/running-pts_mode-configurations.rst index 53cae1bdf4..fb61397321 100644 --- a/doc/source/users_guide/running-single-points/running-pts_mode-configurations.rst +++ b/doc/source/users_guide/running-single-points/running-pts_mode-configurations.rst @@ -6,6 +6,9 @@ Running a single point using global data - PTS_MODE **************************************************** +.. warning:: + ``PTS_MODE`` has been mostly deprecated in favor of ``subset_data`` (Sect. :numref:`single_point_subset_data`). You should only consider using it if you are using the Single Column Atmospheric Model (SCAM). + ``PTS_MODE`` enables you to run the model using global datasets, but just picking a single point from those datasets and operating on it. It can be a very quick way to do fast simulations and get a quick turnaround. To setup a ``PTS_MODE`` simulation you use the ``-pts_lat`` and ``-pts_lon`` arguments to ``cime/scripts/create_newcase`` to give the latitude and longitude of the point you want to simulate for (the code will pick the point on the global grid nearest to the point you give. Here's an example to setup a simulation for the nearest point at 2-degree resolution to Boulder Colorado. diff --git a/doc/source/users_guide/running-single-points/running-single-point-configurations.rst b/doc/source/users_guide/running-single-points/running-single-point-configurations.rst index 0e7f1262e2..56cad6a11e 100644 --- a/doc/source/users_guide/running-single-points/running-single-point-configurations.rst +++ b/doc/source/users_guide/running-single-points/running-single-point-configurations.rst @@ -6,7 +6,7 @@ Running Single Point Configurations ****************************************** -In addition to ``PTS_MODE`` (Sect. :numref:`pts_mode`), CLM supports running using single-point or regional datasets that are customized to a particular region. CLM supports a a small number of out-of-the-box single-point and regional datasets. However, users can create their own dataset. +In addition to running with the outputs of ``subset_data`` (Sect. :numref:`single_point_subset_data`), CLM supports running using single-point or regional datasets that are customized to a particular region. CLM supports a a small number of out-of-the-box single-point and regional datasets. However, users can create their own dataset. To get the list of supported dataset resolutions do this: :: @@ -32,7 +32,7 @@ The resolution names that have an underscore in them ("_") are all single-point .. note:: When running a single point, the number of processors is automatically set to one, which is the only value allowed. .. warning:: - Just like ``PTS_MODE`` (Sect. :numref:`pts_mode`), by default these setups sometimes run with ``MPILIB=mpi-serial`` (in the ``env_build.xml`` file) turned on, which allows you to run the model interactively. On some machines this mode is NOT supported and you may need to change it to FALSE before you are able to build. + Just like running with the outputs from ``subset_data`` (Sect. :numref:`single_point_subset_data`), by default these setups sometimes run with ``MPILIB=mpi-serial`` (in the ``env_build.xml`` file) turned on, which allows you to run the model interactively. On some machines this mode is NOT supported and you may need to change it to FALSE before you are able to build. .. _single-point-global-climate: diff --git a/doc/source/users_guide/running-single-points/running-single-point-subset-data.rst b/doc/source/users_guide/running-single-points/running-single-point-subset-data.rst new file mode 100644 index 0000000000..f829f2c624 --- /dev/null +++ b/doc/source/users_guide/running-single-points/running-single-point-subset-data.rst @@ -0,0 +1,60 @@ +.. include:: ../substitutions.rst + +.. _single_point_subset_data: + +**************************************** +Running a single point using global data +**************************************** + +``subset_data`` enables you to run the model using global datasets, but just picking a single point from those datasets and operating on it. It can be a very quick way to do fast simulations and get a quick turnaround. + +Subset the data +------------------ + +For single-point cases, you need to subset a surface dataset and (optionally) DATM data. The Python script to subset this data can be found in the CTSM repository at ``tools/site_and_regional/subset_data``. + +Note that you will need to have a python environment set up that includes the packages ``scipy``, ``xarray``, and ``numpy``. If you have conda or miniconda installed, you can create a conda environment for this and other CTSM python tools using the script ``py_env_create`` at the top level of your CTSM checkout. + +To subset surface data and climate forcings (DATM) for a single point, use the command: + +.. code:: shell + + tools/site_and_regional/subset_data point \ + --lat $my_lat --lon $my_lon --site $my_site_name \ + --create-surface --create-datm \ + --datm-syr $my_start_year --datm-eyr $my_end_year \ + --create-user-mods --outdir $my_output_dir + +- ``$my_lat``: latitude of point, *must be between -90 and 90 degrees*. E.g., Boulder, CO, USA: 40. +- ``$my_lon``: longitude of point, *must be between 0 and 360 degrees*. E.g., Boulder, CO, USA: 55. +- ``$my_site_name``: name of site, *used for file naming* +- ``$my_start_year``: start year for DATM data to subset, *default between 1901 and 2014* +- ``$my_end_year``: end year for DATM data to subset, *default between 1901 and 2014; the default CRUJRA2024 DATM data ends in 2023, while the old default GSWP3 ends in 2015; see note below about switching the default DATM data* +- ``$my_output_dir``: output directory to place the subset data and user_mods directory. This should be something specific to *just* your data for ``$my_site_name``. + +You can also have the script subset land-use data. See the help (``tools/site_and_regional/subset_data --help``) for all argument options. + +**Note that this script defaults to subsetting specific surface, domain, and land-use files and the CRUJRA2024 DATM data, and can currently only be run as-is on Derecho. If you're not on Derecho, use the ``--inputdata-dir`` to specify where the top level of your CESM input data is. Also, to subset GSWP3 instead of CRUJRA2024 DATM data, you currently need to hardwire datm_type = "datm_gswp3" (instead of the default "datm_crujra") in python/ctsm/subset_data.py.** + +The ``--create-user-mods`` command tells the script to set up a user mods directory in your specified ``$my_output_dir`` and to specify the required ``PTS_LAT`` and ``PTS_LON`` settings. You can then use this user mods directory to set up your CTSM case, as described below. + +Create the case +------------------ + +You can use the user mods directory set up in the previous subset data step to tell CIME/CTSM where your subset files are located. + +.. code:: shell + + cime/scripts/create_newcase --case $my_case_name --res CLM_USRDAT \ + --compset $compset --run-unsupported \ + --user-mods-dirs $my_output_dir/user_mods + +- ``$my_case_name``: the path of the case directory you want to create +- ``$compset``: the compset you would like to use (for example, ``I2000Clm60Bgc``) +- Note the use of ``$my_output_dir/user_mods`` which is the ``user_mods/`` directory that the subset data script set up within your specified ``$my_output_dir``. + +Note that ``./case.setup`` on Derecho will automatically set queue to ``develop`` and walltime to one hour. You might need a longer walltime, but the maximum walltime for ``develop`` is one hour. To change it to two hours on Derecho: + +.. code:: shell + + ./xmlchange --subgroup case.run JOB_QUEUE=main,JOB_WALLCLOCK_TIME=2:00:00 diff --git a/doc/source/users_guide/running-single-points/single-point-and-regional-grid-configurations.rst b/doc/source/users_guide/running-single-points/single-point-and-regional-grid-configurations.rst index 61e1f25de8..d16dfa6f5e 100644 --- a/doc/source/users_guide/running-single-points/single-point-and-regional-grid-configurations.rst +++ b/doc/source/users_guide/running-single-points/single-point-and-regional-grid-configurations.rst @@ -10,7 +10,7 @@ CLM allows you to set up and run cases with a single-point or a local region as There are two different ways to do this for normal-supported site -``PTS_MODE`` +``subset_data`` runs for a single point using global datasets. ``CLM_USRDAT_NAME`` @@ -24,9 +24,9 @@ There are two different ways to do this for normal-supported site Running for a *normal supported site* is a great solution, if one of the supported single-point/regional datasets, is your region of interest (see :ref:`running-single-point-datasets`). All the datasets are created for you, and you can easily select one and run, out of the box with it using a supported resolution from the top level of the CESM scripts. The problem is that there is a very limited set of supported datasets. You can also use this method for your own datasets, but you have to create the datasets, and add them to the XML database in scripts, CLM and to the DATM. This is worthwhile if you want to repeat many multiple cases for a given point or region. -In general :ref:`pts_mode` is the quick and dirty method that gets you started without having to create datasets -- but has limitations. It's good for an initial attempt at seeing results for a point of interest, but since you can NOT restart with it, it's usage is limited. It is the quickest method as you can create a case for it directly from ``cime/scripts/create_newcase``. Although you can't restart, running a single point is very fast, and you can run for long simulation times even without restarts. +In general :ref:`single_point_subset_data` is the quick and dirty method that gets you started, but it has limitations. It's good for an initial attempt at seeing results for a point of interest, but since you can NOT restart with it, its usage is limited. It is the quickest method as you can create a case for it directly from ``cime/scripts/create_newcase``. Although you can't restart, running a single point is very fast, and you can run for long simulation times even without restarts. -Next, ``CLM_USRDAT_NAME`` is the best way to setup cases quickly where you have to create your own datasets (see :ref:`running-single-point-datasets`). With this method you don't have to change DATM or add files to the XML database -- but you have to follow a strict naming convention for files. However, once the files are named and in the proper location, you can easily setup new cases that use these datasets. This is good for treating all the required datasets as a "group" and for a particular model version. For advanced CLM developers who need to track dataset changes with different model versions you would be best off adding these datasets as supported datasets with the "normal supported datasets" method. +Next, ``CLM_USRDAT_NAME`` using ``subset_data`` is the best way to setup cases quickly where you have a simple tool to create your own datasets (see :ref:`single_point_subset_data`). With this method you don't have to change DATM or add files to the XML database. ``subset_data`` will create a usermod directory where you can store your files and the files needed to directly run a case. Finally, if you also have meteorology data that you want to force your CLM simulations with you'll need to setup cases as described in :ref:`creating-your-own-singlepoint-dataset`. You'll need to create CLM datasets either according to ``CLM_USRDAT_NAME``. You may also need to modify DATM to use your forcing data. And you'll need to change your forcing data to be in a format that DATM can use. diff --git a/python/ctsm/config_utils.py b/python/ctsm/config_utils.py index bd53825f14..872dbe646f 100644 --- a/python/ctsm/config_utils.py +++ b/python/ctsm/config_utils.py @@ -25,17 +25,15 @@ def lon_range_0_to_360(lon_in): Restrict longitude to 0 to 360 when given as -180 to 180. """ if -180 <= lon_in < 0: - lon_out = lon_in % 360 - logger.info( - "Resetting longitude from %s to %s to keep in the range " " 0 to 360", - str(lon_in), - str(lon_out), + raise NotImplementedError( + "A negative longitude suggests you input longitudes in the range [-180, 0)---" + "i.e., centered around the Prime Meridian. This code requires longitudes in the " + "range [0, 360)---i.e., starting at the International Date Line." ) - elif 0 <= lon_in <= 360 or lon_in is None: - lon_out = lon_in - else: + if not (0 <= lon_in <= 360 or lon_in is None): errmsg = "lon_in needs to be in the range 0 to 360" abort(errmsg) + lon_out = lon_in return lon_out diff --git a/python/ctsm/site_and_regional/single_point_case.py b/python/ctsm/site_and_regional/single_point_case.py index 2c2aebad52..ad64234391 100644 --- a/python/ctsm/site_and_regional/single_point_case.py +++ b/python/ctsm/site_and_regional/single_point_case.py @@ -608,7 +608,7 @@ def extract_datm_at(self, file_in, file_out): f_in.close() f_out.close() - def write_shell_commands(self, file): + def write_shell_commands(self, file, datm_syr, datm_eyr): """ writes out xml commands commands to a file (i.e. shell_commands) for single-point runs """ @@ -619,6 +619,10 @@ def write_shell_commands(self, file): self.write_to_file("./xmlchange PTS_LON={}".format(str(self.plon)), nl_file) self.write_to_file("./xmlchange PTS_LAT={}".format(str(self.plat)), nl_file) self.write_to_file("./xmlchange MPILIB=mpi-serial", nl_file) + if self.create_datm: + self.write_to_file(f"./xmlchange DATM_YR_ALIGN={datm_syr}", nl_file) + self.write_to_file(f"./xmlchange DATM_YR_START={datm_syr}", nl_file) + self.write_to_file(f"./xmlchange DATM_YR_END={datm_eyr}", nl_file) def write_datm_streams_lines(self, streamname, datmfiles, file): """ diff --git a/python/ctsm/subset_data.py b/python/ctsm/subset_data.py index fb0ba925a9..cba483c313 100644 --- a/python/ctsm/subset_data.py +++ b/python/ctsm/subset_data.py @@ -215,7 +215,11 @@ def get_parser(): ) rg_parser.add_argument( "--lon1", - help="Region westernmost longitude. [default: %(default)s]", + help=( + "Region westernmost longitude. Must be in [0, 360) format: i.e., starting at the" + " International Date Line rather than centered on the Prime Meridian. [default:" + " %(default)s]" + ), action="store", dest="lon1", required=False, @@ -224,7 +228,11 @@ def get_parser(): ) rg_parser.add_argument( "--lon2", - help="Region easternmost longitude. [default: %(default)s]", + help=( + "Region easternmost longitude. Must be in [0, 360) format: i.e., starting at the" + " International Date Line rather than centered on the Prime Meridian. [default:" + " %(default)s]" + ), action="store", dest="lon2", required=False, @@ -456,7 +464,8 @@ def check_args(args): """\ \n ------------------------------------ \n --surf-year option is NOT set to 1850 and the --create-landuse option - \n is selected which requires it to be 1850 + \n is selected which requires it to be 1850 (see + https://github.com/ESCOMP/CTSM/issues/2018) """ ) raise argparse.ArgumentError(None, err_msg) @@ -505,6 +514,17 @@ def check_args(args): ) raise argparse.ArgumentError(None, err_msg) + if args.run_type == "region" and args.create_datm: + err_msg = textwrap.dedent( + """\ + \n ------------------------------------ + \nERROR: For regional cases, you can not subset datm data + \n (see https://github.com/ESCOMP/CTSM/issues/2110) + \n but you can just use the global data instead + """ + ) + raise NotImplementedError(None, err_msg) + def setup_user_mods(user_mods_dir, cesmroot): """ @@ -686,7 +706,8 @@ def subset_point(args, file_dict: dict): # -- Write shell commands if single_point.create_user_mods: - single_point.write_shell_commands(os.path.join(args.user_mods_dir, "shell_commands")) + shell_commands_file = os.path.join(args.user_mods_dir, "shell_commands") + single_point.write_shell_commands(shell_commands_file, args.datm_syr, args.datm_eyr) logger.info("Successfully ran script for single point.") diff --git a/python/ctsm/test/test_unit_args_utils.py b/python/ctsm/test/test_unit_args_utils.py index 3a31b25224..2328e17d91 100755 --- a/python/ctsm/test/test_unit_args_utils.py +++ b/python/ctsm/test/test_unit_args_utils.py @@ -18,6 +18,7 @@ # pylint: disable=wrong-import-position from ctsm.args_utils import plon_type, plat_type from ctsm import unit_testing +from ctsm.test.test_unit_utils import wrong_lon_type_error_regex # pylint: disable=invalid-name @@ -40,8 +41,13 @@ def test_plonType_negative(self): """ Test of negative plon between -180 and 0 """ - result = plon_type(-30) - self.assertEqual(result, 330.0) + # When CTSM Issue #3001 is resolved, this assertRaisesRegex block should be deleted and the + # rest of this test uncommented + with self.assertRaisesRegex(NotImplementedError, wrong_lon_type_error_regex): + plon_type(-30) + + # result = plon_type(-30) + # self.assertEqual(result, 330.0) # -- > 360 def test_plonType_outOfBounds_positive(self): @@ -64,8 +70,13 @@ def test_plonType_negative_180(self): """ Test for when plon values are -180 """ - result = plon_type(-180) - self.assertEqual(result, 180.0) + # When CTSM Issue #3001 is resolved, this assertRaisesRegex block should be deleted and the + # rest of this test uncommented + with self.assertRaisesRegex(NotImplementedError, wrong_lon_type_error_regex): + plon_type(-180) + + # result = plon_type(-180) + # self.assertEqual(result, 180.0) # -- = 0 def test_plonType_zero(self): diff --git a/python/ctsm/test/test_unit_config_utils.py b/python/ctsm/test/test_unit_config_utils.py index c9ee23bac3..e45443d952 100644 --- a/python/ctsm/test/test_unit_config_utils.py +++ b/python/ctsm/test/test_unit_config_utils.py @@ -9,6 +9,7 @@ from ctsm import unit_testing from ctsm.config_utils import lon_range_0_to_360, get_config_value_or_array +from ctsm.test.test_unit_utils import wrong_lon_type_error_regex # Allow test names that pylint doesn't like; otherwise hard to make them # readable @@ -32,14 +33,26 @@ def setUp(self): def test_negative_lon(self): """Test lon_range_0_to_360 for a negative longitude""" lon = -180.0 - lon_new = lon_range_0_to_360(lon) - self.assertEqual(lon_new, 180.0, "lon not as expected") + + # When CTSM Issue #3001 is resolved, this assertRaisesRegex block should be deleted and the + # rest of this test uncommented + with self.assertRaisesRegex(NotImplementedError, wrong_lon_type_error_regex): + lon_range_0_to_360(lon) + + # lon_new = lon_range_0_to_360(lon) + # self.assertEqual(lon_new, 180.0, "lon not as expected") def test_negative2_lon(self): """Test lon_range_0_to_360 for a negative longitude""" lon = -5.0 - lon_new = lon_range_0_to_360(lon) - self.assertEqual(lon_new, 355.0, "lon not as expected") + + # When CTSM Issue #3001 is resolved, this assertRaisesRegex block should be deleted and the + # rest of this test uncommented + with self.assertRaisesRegex(NotImplementedError, wrong_lon_type_error_regex): + lon_range_0_to_360(lon) + + # lon_new = lon_range_0_to_360(lon) + # self.assertEqual(lon_new, 355.0, "lon not as expected") def test_regular_lon(self): """Test lon_range_0_to_360 for a regular longitude""" diff --git a/python/ctsm/test/test_unit_modify_fsurdat.py b/python/ctsm/test/test_unit_modify_fsurdat.py index b796cd940d..3220ba2569 100755 --- a/python/ctsm/test/test_unit_modify_fsurdat.py +++ b/python/ctsm/test/test_unit_modify_fsurdat.py @@ -12,6 +12,7 @@ from ctsm import unit_testing from ctsm.config_utils import lon_range_0_to_360 from ctsm.modify_input_files.modify_fsurdat import ModifyFsurdat +from ctsm.test.test_unit_utils import wrong_lon_type_error_regex # Allow test names that pylint doesn't like; otherwise hard to make them # readable @@ -171,36 +172,42 @@ def test_getNotRectangle_lon1leLon2Lat1gtLat2(self): # get cols, rows also min_lon = -3 # expects min_lon < max_lon min_lat = -2 # expects min_lat < max_lat - longxy, latixy, cols, rows = self._get_longxy_latixy( - _min_lon=min_lon, _max_lon=6, _min_lat=min_lat, _max_lat=5 - ) - - # get not_rectangle from user-defined lon_1, lon_2, lat_1, lat_2 - # I have chosen the lon/lat ranges to match their corresponding index - # values to keep this simple (see usage below) - lon_1 = 0 - lon_2 = 4 # lon_1 < lon_2 - lat_1 = 4 - lat_2 = 0 # lat_1 > lat_2 - rectangle = ModifyFsurdat._get_rectangle( - lon_1=lon_1, - lon_2=lon_2, - lat_1=lat_1, - lat_2=lat_2, - longxy=longxy, - latixy=latixy, - ) - not_rectangle = np.logical_not(rectangle) - compare = np.ones((rows, cols)) - # assert this to confirm intuitive understanding of these matrices - self.assertEqual(np.size(not_rectangle), np.size(compare)) - # Hardwire where I expect not_rectangle to be False (0) - # I have chosen the lon/lat ranges to match their corresponding index - # values to keep this simple - compare[: lat_2 - min_lat + 1, lon_1 - min_lon : lon_2 - min_lon + 1] = 0 - compare[lat_1 - min_lat :, lon_1 - min_lon : lon_2 - min_lon + 1] = 0 - np.testing.assert_array_equal(not_rectangle, compare) + # When CTSM Issue #3001 is resolved, this assertRaisesRegex block should be deleted and the + # rest of this test uncommented + with self.assertRaisesRegex(NotImplementedError, wrong_lon_type_error_regex): + self._get_longxy_latixy(_min_lon=min_lon, _max_lon=6, _min_lat=min_lat, _max_lat=5) + + # longxy, latixy, cols, rows = self._get_longxy_latixy( + # _min_lon=min_lon, _max_lon=6, _min_lat=min_lat, _max_lat=5 + # ) + + # # get not_rectangle from user-defined lon_1, lon_2, lat_1, lat_2 + # # I have chosen the lon/lat ranges to match their corresponding index + # # values to keep this simple (see usage below) + # lon_1 = 0 + # lon_2 = 4 # lon_1 < lon_2 + # lat_1 = 4 + # lat_2 = 0 # lat_1 > lat_2 + # rectangle = ModifyFsurdat._get_rectangle( + # lon_1=lon_1, + # lon_2=lon_2, + # lat_1=lat_1, + # lat_2=lat_2, + # longxy=longxy, + # latixy=latixy, + # ) + # not_rectangle = np.logical_not(rectangle) + # compare = np.ones((rows, cols)) + # # assert this to confirm intuitive understanding of these matrices + # self.assertEqual(np.size(not_rectangle), np.size(compare)) + + # # Hardwire where I expect not_rectangle to be False (0) + # # I have chosen the lon/lat ranges to match their corresponding index + # # values to keep this simple + # compare[: lat_2 - min_lat + 1, lon_1 - min_lon : lon_2 - min_lon + 1] = 0 + # compare[lat_1 - min_lat :, lon_1 - min_lon : lon_2 - min_lon + 1] = 0 + # np.testing.assert_array_equal(not_rectangle, compare) def test_getNotRectangle_lon1gtLon2Lat1leLat2(self): """ @@ -261,38 +268,43 @@ def test_getNotRectangle_lon1gtLon2Lat1gtLat2(self): # get cols, rows also min_lon = -8 # expects min_lon < max_lon min_lat = -9 # expects min_lat < max_lat - longxy, latixy, cols, rows = self._get_longxy_latixy( - _min_lon=min_lon, _max_lon=5, _min_lat=min_lat, _max_lat=6 - ) - # get not_rectangle from user-defined lon_1, lon_2, lat_1, lat_2 - # I have chosen the lon/lat ranges to match their corresponding index - # values to keep this simple (see usage below) - lon_1 = -1 - lon_2 = -6 # lon_1 > lon_2 - lat_1 = 0 - lat_2 = -3 # lat_1 > lat_2 - rectangle = ModifyFsurdat._get_rectangle( - lon_1=lon_1, - lon_2=lon_2, - lat_1=lat_1, - lat_2=lat_2, - longxy=longxy, - latixy=latixy, - ) - not_rectangle = np.logical_not(rectangle) - compare = np.ones((rows, cols)) - # assert this to confirm intuitive understanding of these matrices - self.assertEqual(np.size(not_rectangle), np.size(compare)) - - # Hardwire where I expect not_rectangle to be False (0) - # I have chosen the lon/lat ranges to match their corresponding index - # values to keep this simple - compare[: lat_2 - min_lat + 1, : lon_2 - min_lon + 1] = 0 - compare[: lat_2 - min_lat + 1, lon_1 - min_lon :] = 0 - compare[lat_1 - min_lat :, : lon_2 - min_lon + 1] = 0 - compare[lat_1 - min_lat :, lon_1 - min_lon :] = 0 - np.testing.assert_array_equal(not_rectangle, compare) + # When CTSM Issue #3001 is resolved, this assertRaisesRegex block should be deleted and the + # rest of this test uncommented + with self.assertRaisesRegex(NotImplementedError, wrong_lon_type_error_regex): + self._get_longxy_latixy(_min_lon=min_lon, _max_lon=5, _min_lat=min_lat, _max_lat=6) + + # longxy, latixy, cols, rows = self._get_longxy_latixy( + # _min_lon=min_lon, _max_lon=5, _min_lat=min_lat, _max_lat=6 + # ) + # # get not_rectangle from user-defined lon_1, lon_2, lat_1, lat_2 + # # I have chosen the lon/lat ranges to match their corresponding index + # # values to keep this simple (see usage below) + # lon_1 = -1 + # lon_2 = -6 # lon_1 > lon_2 + # lat_1 = 0 + # lat_2 = -3 # lat_1 > lat_2 + # rectangle = ModifyFsurdat._get_rectangle( + # lon_1=lon_1, + # lon_2=lon_2, + # lat_1=lat_1, + # lat_2=lat_2, + # longxy=longxy, + # latixy=latixy, + # ) + # not_rectangle = np.logical_not(rectangle) + # compare = np.ones((rows, cols)) + # # assert this to confirm intuitive understanding of these matrices + # self.assertEqual(np.size(not_rectangle), np.size(compare)) + + # # Hardwire where I expect not_rectangle to be False (0) + # # I have chosen the lon/lat ranges to match their corresponding index + # # values to keep this simple + # compare[: lat_2 - min_lat + 1, : lon_2 - min_lon + 1] = 0 + # compare[: lat_2 - min_lat + 1, lon_1 - min_lon :] = 0 + # compare[lat_1 - min_lat :, : lon_2 - min_lon + 1] = 0 + # compare[lat_1 - min_lat :, lon_1 - min_lon :] = 0 + # np.testing.assert_array_equal(not_rectangle, compare) def test_getNotRectangle_lonsStraddle0deg(self): """ diff --git a/python/ctsm/test/test_unit_subset_data.py b/python/ctsm/test/test_unit_subset_data.py index a918fb35f0..a089a11a90 100755 --- a/python/ctsm/test/test_unit_subset_data.py +++ b/python/ctsm/test/test_unit_subset_data.py @@ -20,6 +20,7 @@ from ctsm import unit_testing from ctsm.subset_data import get_parser, setup_files, check_args from ctsm.path_utils import path_to_ctsm_root +from ctsm.test.test_unit_utils import wrong_lon_type_error_regex # pylint: disable=invalid-name @@ -235,6 +236,22 @@ def test_create_mesh_without_domain(self): ): check_args(self.args) + # When CTSM issue #2110 is resolved, this test should be removed. + def test_subset_region_errors_if_datm(self): + """ + Test that you can't run subset_data for a region with --create-datm + """ + sys.argv = [ + "subset_data", + "region", + "--create-datm", + ] + self.args = self.parser.parse_args() + with self.assertRaisesRegex( + NotImplementedError, "For regional cases, you can not subset datm data" + ): + check_args(self.args) + def test_complex_option_works(self): """ Test that check_args won't flag a set of complex options that is valid @@ -252,13 +269,36 @@ def test_complex_option_works(self): "1850", "--create-mesh", "--create-domain", - "--create-datm", + # "--create-datm", # Uncomment this when CTSM issue #2110 is resolved "--verbose", "--crop", ] self.args = self.parser.parse_args() check_args(self.args) + # When CTSM issue #3001 is fixed, this test should be replaced with one that checks for correct + # conversion of longitudes specified in the [-180, 180) format. + def test_negative_lon_errors(self): + """ + Test that a negative longitude results in a descriptive error + """ + sys.argv = [ + "subset_data", + "region", + "--create-domain", + "--verbose", + "--lat1", + "0", + "--lat2", + "40", + "--lon1", + "-20", + "--lon2", + "40", + ] + with self.assertRaisesRegex(NotImplementedError, wrong_lon_type_error_regex): + self.args = self.parser.parse_args() + if __name__ == "__main__": unit_testing.setup_for_tests() diff --git a/python/ctsm/test/test_unit_utils.py b/python/ctsm/test/test_unit_utils.py index aed43cfede..4ed8019792 100755 --- a/python/ctsm/test/test_unit_utils.py +++ b/python/ctsm/test/test_unit_utils.py @@ -16,6 +16,9 @@ # to make readable unit test names # pylint: disable=invalid-name +# When CTSM Issue #3001 is resolved, this should be deleted +wrong_lon_type_error_regex = r"\[-180, 0\).*\[0, 360\)" + class TestUtilsFillTemplateFile(unittest.TestCase): """Tests of utils: fill_template_file""" @@ -64,16 +67,28 @@ def test_lonRange0To360_lonIsNeg180(self): Tests that negative inputs to lon_range_0_to_360 get 360 added to them """ inval = -180 - result = lon_range_0_to_360(inval) - self.assertEqual(result, inval + 360) + + # When CTSM Issue #3001 is resolved, this assertRaisesRegex block should be deleted and the + # rest of this test uncommented + with self.assertRaisesRegex(NotImplementedError, wrong_lon_type_error_regex): + lon_range_0_to_360(inval) + + # result = lon_range_0_to_360(inval) + # self.assertEqual(result, inval + 360) def test_lonRange0To360_lonIsNegGreaterThan1(self): """ Tests that negative inputs to lon_range_0_to_360 get 360 added to them """ inval = -0.001 - result = lon_range_0_to_360(inval) - self.assertEqual(result, inval + 360) + + # When CTSM Issue #3001 is resolved, this assertRaisesRegex block should be deleted and the + # rest of this test uncommented + with self.assertRaisesRegex(NotImplementedError, wrong_lon_type_error_regex): + lon_range_0_to_360(inval) + + # result = lon_range_0_to_360(inval) + # self.assertEqual(result, inval + 360) def test_lonRange0To360_lonIs0(self): """