Best JavaScript code snippet using playwright-internal
test_indexing.py
Source:test_indexing.py
1from datetime import timedelta2import numpy as np3import pytest4from pandas.errors import InvalidIndexError5import pandas as pd6from pandas import Categorical, Index, MultiIndex, date_range7import pandas._testing as tm8class TestSliceLocs:9 def test_slice_locs_partial(self, idx):10 sorted_idx, _ = idx.sortlevel(0)11 result = sorted_idx.slice_locs(("foo", "two"), ("qux", "one"))12 assert result == (1, 5)13 result = sorted_idx.slice_locs(None, ("qux", "one"))14 assert result == (0, 5)15 result = sorted_idx.slice_locs(("foo", "two"), None)16 assert result == (1, len(sorted_idx))17 result = sorted_idx.slice_locs("bar", "baz")18 assert result == (2, 4)19 def test_slice_locs(self):20 df = tm.makeTimeDataFrame()21 stacked = df.stack()22 idx = stacked.index23 slob = slice(*idx.slice_locs(df.index[5], df.index[15]))24 sliced = stacked[slob]25 expected = df[5:16].stack()26 tm.assert_almost_equal(sliced.values, expected.values)27 slob = slice(28 *idx.slice_locs(29 df.index[5] + timedelta(seconds=30),30 df.index[15] - timedelta(seconds=30),31 )32 )33 sliced = stacked[slob]34 expected = df[6:15].stack()35 tm.assert_almost_equal(sliced.values, expected.values)36 def test_slice_locs_with_type_mismatch(self):37 df = tm.makeTimeDataFrame()38 stacked = df.stack()39 idx = stacked.index40 with pytest.raises(TypeError, match="^Level type mismatch"):41 idx.slice_locs((1, 3))42 with pytest.raises(TypeError, match="^Level type mismatch"):43 idx.slice_locs(df.index[5] + timedelta(seconds=30), (5, 2))44 df = tm.makeCustomDataframe(5, 5)45 stacked = df.stack()46 idx = stacked.index47 with pytest.raises(TypeError, match="^Level type mismatch"):48 idx.slice_locs(timedelta(seconds=30))49 # TODO: Try creating a UnicodeDecodeError in exception message50 with pytest.raises(TypeError, match="^Level type mismatch"):51 idx.slice_locs(df.index[1], (16, "a"))52 def test_slice_locs_not_sorted(self):53 index = MultiIndex(54 levels=[Index(np.arange(4)), Index(np.arange(4)), Index(np.arange(4))],55 codes=[56 np.array([0, 0, 1, 2, 2, 2, 3, 3]),57 np.array([0, 1, 0, 0, 0, 1, 0, 1]),58 np.array([1, 0, 1, 1, 0, 0, 1, 0]),59 ],60 )61 msg = "[Kk]ey length.*greater than MultiIndex lexsort depth"62 with pytest.raises(KeyError, match=msg):63 index.slice_locs((1, 0, 1), (2, 1, 0))64 # works65 sorted_index, _ = index.sortlevel(0)66 # should there be a test case here???67 sorted_index.slice_locs((1, 0, 1), (2, 1, 0))68 def test_slice_locs_not_contained(self):69 # some searchsorted action70 index = MultiIndex(71 levels=[[0, 2, 4, 6], [0, 2, 4]],72 codes=[[0, 0, 0, 1, 1, 2, 3, 3, 3], [0, 1, 2, 1, 2, 2, 0, 1, 2]],73 )74 result = index.slice_locs((1, 0), (5, 2))75 assert result == (3, 6)76 result = index.slice_locs(1, 5)77 assert result == (3, 6)78 result = index.slice_locs((2, 2), (5, 2))79 assert result == (3, 6)80 result = index.slice_locs(2, 5)81 assert result == (3, 6)82 result = index.slice_locs((1, 0), (6, 3))83 assert result == (3, 8)84 result = index.slice_locs(-1, 10)85 assert result == (0, len(index))86 @pytest.mark.parametrize(87 "index_arr,expected,start_idx,end_idx",88 [89 ([[np.nan, "a", "b"], ["c", "d", "e"]], (0, 3), np.nan, None),90 ([[np.nan, "a", "b"], ["c", "d", "e"]], (0, 3), np.nan, "b"),91 ([[np.nan, "a", "b"], ["c", "d", "e"]], (0, 3), np.nan, ("b", "e")),92 ([["a", "b", "c"], ["d", np.nan, "e"]], (1, 3), ("b", np.nan), None),93 ([["a", "b", "c"], ["d", np.nan, "e"]], (1, 3), ("b", np.nan), "c"),94 ([["a", "b", "c"], ["d", np.nan, "e"]], (1, 3), ("b", np.nan), ("c", "e")),95 ],96 )97 def test_slice_locs_with_missing_value(98 self, index_arr, expected, start_idx, end_idx99 ):100 # issue 19132101 idx = MultiIndex.from_arrays(index_arr)102 result = idx.slice_locs(start=start_idx, end=end_idx)103 assert result == expected104def test_putmask_with_wrong_mask(idx):105 # GH18368106 msg = "putmask: mask and data must be the same size"107 with pytest.raises(ValueError, match=msg):108 idx.putmask(np.ones(len(idx) + 1, np.bool_), 1)109 with pytest.raises(ValueError, match=msg):110 idx.putmask(np.ones(len(idx) - 1, np.bool_), 1)111 with pytest.raises(ValueError, match=msg):112 idx.putmask("foo", 1)113class TestGetIndexer:114 def test_get_indexer(self):115 major_axis = Index(np.arange(4))116 minor_axis = Index(np.arange(2))117 major_codes = np.array([0, 0, 1, 2, 2, 3, 3], dtype=np.intp)118 minor_codes = np.array([0, 1, 0, 0, 1, 0, 1], dtype=np.intp)119 index = MultiIndex(120 levels=[major_axis, minor_axis], codes=[major_codes, minor_codes]121 )122 idx1 = index[:5]123 idx2 = index[[1, 3, 5]]124 r1 = idx1.get_indexer(idx2)125 tm.assert_almost_equal(r1, np.array([1, 3, -1], dtype=np.intp))126 r1 = idx2.get_indexer(idx1, method="pad")127 e1 = np.array([-1, 0, 0, 1, 1], dtype=np.intp)128 tm.assert_almost_equal(r1, e1)129 r2 = idx2.get_indexer(idx1[::-1], method="pad")130 tm.assert_almost_equal(r2, e1[::-1])131 rffill1 = idx2.get_indexer(idx1, method="ffill")132 tm.assert_almost_equal(r1, rffill1)133 r1 = idx2.get_indexer(idx1, method="backfill")134 e1 = np.array([0, 0, 1, 1, 2], dtype=np.intp)135 tm.assert_almost_equal(r1, e1)136 r2 = idx2.get_indexer(idx1[::-1], method="backfill")137 tm.assert_almost_equal(r2, e1[::-1])138 rbfill1 = idx2.get_indexer(idx1, method="bfill")139 tm.assert_almost_equal(r1, rbfill1)140 # pass non-MultiIndex141 r1 = idx1.get_indexer(idx2.values)142 rexp1 = idx1.get_indexer(idx2)143 tm.assert_almost_equal(r1, rexp1)144 r1 = idx1.get_indexer([1, 2, 3])145 assert (r1 == [-1, -1, -1]).all()146 # create index with duplicates147 idx1 = Index(list(range(10)) + list(range(10)))148 idx2 = Index(list(range(20)))149 msg = "Reindexing only valid with uniquely valued Index objects"150 with pytest.raises(InvalidIndexError, match=msg):151 idx1.get_indexer(idx2)152 def test_get_indexer_nearest(self):153 midx = MultiIndex.from_tuples([("a", 1), ("b", 2)])154 msg = (155 "method='nearest' not implemented yet for MultiIndex; "156 "see GitHub issue 9365"157 )158 with pytest.raises(NotImplementedError, match=msg):159 midx.get_indexer(["a"], method="nearest")160 msg = "tolerance not implemented yet for MultiIndex"161 with pytest.raises(NotImplementedError, match=msg):162 midx.get_indexer(["a"], method="pad", tolerance=2)163 def test_get_indexer_categorical_time(self):164 # https://github.com/pandas-dev/pandas/issues/21390165 midx = MultiIndex.from_product(166 [167 Categorical(["a", "b", "c"]),168 Categorical(date_range("2012-01-01", periods=3, freq="H")),169 ]170 )171 result = midx.get_indexer(midx)172 tm.assert_numpy_array_equal(result, np.arange(9, dtype=np.intp))173 @pytest.mark.parametrize(174 "index_arr,labels,expected",175 [176 (177 [[1, np.nan, 2], [3, 4, 5]],178 [1, np.nan, 2],179 np.array([-1, -1, -1], dtype=np.intp),180 ),181 ([[1, np.nan, 2], [3, 4, 5]], [(np.nan, 4)], np.array([1], dtype=np.intp)),182 ([[1, 2, 3], [np.nan, 4, 5]], [(1, np.nan)], np.array([0], dtype=np.intp)),183 (184 [[1, 2, 3], [np.nan, 4, 5]],185 [np.nan, 4, 5],186 np.array([-1, -1, -1], dtype=np.intp),187 ),188 ],189 )190 def test_get_indexer_with_missing_value(self, index_arr, labels, expected):191 # issue 19132192 idx = MultiIndex.from_arrays(index_arr)193 result = idx.get_indexer(labels)194 tm.assert_numpy_array_equal(result, expected)195 def test_get_indexer_methods(self):196 # https://github.com/pandas-dev/pandas/issues/29896197 # test getting an indexer for another index with different methods198 # confirms that getting an indexer without a filling method, getting an199 # indexer and backfilling, and getting an indexer and padding all behave200 # correctly in the case where all of the target values fall in between201 # several levels in the MultiIndex into which they are getting an indexer202 #203 # visually, the MultiIndexes used in this test are:204 # mult_idx_1:205 # 0: -1 0206 # 1: 2207 # 2: 3208 # 3: 4209 # 4: 0 0210 # 5: 2211 # 6: 3212 # 7: 4213 # 8: 1 0214 # 9: 2215 # 10: 3216 # 11: 4217 #218 # mult_idx_2:219 # 0: 0 1220 # 1: 3221 # 2: 4222 mult_idx_1 = MultiIndex.from_product([[-1, 0, 1], [0, 2, 3, 4]])223 mult_idx_2 = MultiIndex.from_product([[0], [1, 3, 4]])224 indexer = mult_idx_1.get_indexer(mult_idx_2)225 expected = np.array([-1, 6, 7], dtype=indexer.dtype)226 tm.assert_almost_equal(expected, indexer)227 backfill_indexer = mult_idx_1.get_indexer(mult_idx_2, method="backfill")228 expected = np.array([5, 6, 7], dtype=backfill_indexer.dtype)229 tm.assert_almost_equal(expected, backfill_indexer)230 # ensure the legacy "bfill" option functions identically to "backfill"231 backfill_indexer = mult_idx_1.get_indexer(mult_idx_2, method="bfill")232 expected = np.array([5, 6, 7], dtype=backfill_indexer.dtype)233 tm.assert_almost_equal(expected, backfill_indexer)234 pad_indexer = mult_idx_1.get_indexer(mult_idx_2, method="pad")235 expected = np.array([4, 6, 7], dtype=pad_indexer.dtype)236 tm.assert_almost_equal(expected, pad_indexer)237 # ensure the legacy "ffill" option functions identically to "pad"238 pad_indexer = mult_idx_1.get_indexer(mult_idx_2, method="ffill")239 expected = np.array([4, 6, 7], dtype=pad_indexer.dtype)240 tm.assert_almost_equal(expected, pad_indexer)241 def test_get_indexer_three_or_more_levels(self):242 # https://github.com/pandas-dev/pandas/issues/29896243 # tests get_indexer() on MultiIndexes with 3+ levels244 # visually, these are245 # mult_idx_1:246 # 0: 1 2 5247 # 1: 7248 # 2: 4 5249 # 3: 7250 # 4: 6 5251 # 5: 7252 # 6: 3 2 5253 # 7: 7254 # 8: 4 5255 # 9: 7256 # 10: 6 5257 # 11: 7258 #259 # mult_idx_2:260 # 0: 1 1 8261 # 1: 1 5 9262 # 2: 1 6 7263 # 3: 2 1 6264 # 4: 2 7 6265 # 5: 2 7 8266 # 6: 3 6 8267 mult_idx_1 = pd.MultiIndex.from_product([[1, 3], [2, 4, 6], [5, 7]])268 mult_idx_2 = pd.MultiIndex.from_tuples(269 [270 (1, 1, 8),271 (1, 5, 9),272 (1, 6, 7),273 (2, 1, 6),274 (2, 7, 7),275 (2, 7, 8),276 (3, 6, 8),277 ]278 )279 # sanity check280 assert mult_idx_1.is_monotonic281 assert mult_idx_1.is_unique282 assert mult_idx_2.is_monotonic283 assert mult_idx_2.is_unique284 # show the relationships between the two285 assert mult_idx_2[0] < mult_idx_1[0]286 assert mult_idx_1[3] < mult_idx_2[1] < mult_idx_1[4]287 assert mult_idx_1[5] == mult_idx_2[2]288 assert mult_idx_1[5] < mult_idx_2[3] < mult_idx_1[6]289 assert mult_idx_1[5] < mult_idx_2[4] < mult_idx_1[6]290 assert mult_idx_1[5] < mult_idx_2[5] < mult_idx_1[6]291 assert mult_idx_1[-1] < mult_idx_2[6]292 indexer_no_fill = mult_idx_1.get_indexer(mult_idx_2)293 expected = np.array([-1, -1, 5, -1, -1, -1, -1], dtype=indexer_no_fill.dtype)294 tm.assert_almost_equal(expected, indexer_no_fill)295 # test with backfilling296 indexer_backfilled = mult_idx_1.get_indexer(mult_idx_2, method="backfill")297 expected = np.array([0, 4, 5, 6, 6, 6, -1], dtype=indexer_backfilled.dtype)298 tm.assert_almost_equal(expected, indexer_backfilled)299 # now, the same thing, but forward-filled (aka "padded")300 indexer_padded = mult_idx_1.get_indexer(mult_idx_2, method="pad")301 expected = np.array([-1, 3, 5, 5, 5, 5, 11], dtype=indexer_padded.dtype)302 tm.assert_almost_equal(expected, indexer_padded)303 # now, do the indexing in the other direction304 assert mult_idx_2[0] < mult_idx_1[0] < mult_idx_2[1]305 assert mult_idx_2[0] < mult_idx_1[1] < mult_idx_2[1]306 assert mult_idx_2[0] < mult_idx_1[2] < mult_idx_2[1]307 assert mult_idx_2[0] < mult_idx_1[3] < mult_idx_2[1]308 assert mult_idx_2[1] < mult_idx_1[4] < mult_idx_2[2]309 assert mult_idx_2[2] == mult_idx_1[5]310 assert mult_idx_2[5] < mult_idx_1[6] < mult_idx_2[6]311 assert mult_idx_2[5] < mult_idx_1[7] < mult_idx_2[6]312 assert mult_idx_2[5] < mult_idx_1[8] < mult_idx_2[6]313 assert mult_idx_2[5] < mult_idx_1[9] < mult_idx_2[6]314 assert mult_idx_2[5] < mult_idx_1[10] < mult_idx_2[6]315 assert mult_idx_2[5] < mult_idx_1[11] < mult_idx_2[6]316 indexer = mult_idx_2.get_indexer(mult_idx_1)317 expected = np.array(318 [-1, -1, -1, -1, -1, 2, -1, -1, -1, -1, -1, -1], dtype=indexer.dtype319 )320 tm.assert_almost_equal(expected, indexer)321 backfill_indexer = mult_idx_2.get_indexer(mult_idx_1, method="bfill")322 expected = np.array(323 [1, 1, 1, 1, 2, 2, 6, 6, 6, 6, 6, 6], dtype=backfill_indexer.dtype324 )325 tm.assert_almost_equal(expected, backfill_indexer)326 pad_indexer = mult_idx_2.get_indexer(mult_idx_1, method="pad")327 expected = np.array(328 [0, 0, 0, 0, 1, 2, 5, 5, 5, 5, 5, 5], dtype=pad_indexer.dtype329 )330 tm.assert_almost_equal(expected, pad_indexer)331 def test_get_indexer_crossing_levels(self):332 # https://github.com/pandas-dev/pandas/issues/29896333 # tests a corner case with get_indexer() with MultiIndexes where, when we334 # need to "carry" across levels, proper tuple ordering is respected335 #336 # the MultiIndexes used in this test, visually, are:337 # mult_idx_1:338 # 0: 1 1 1 1339 # 1: 2340 # 2: 2 1341 # 3: 2342 # 4: 1 2 1 1343 # 5: 2344 # 6: 2 1345 # 7: 2346 # 8: 2 1 1 1347 # 9: 2348 # 10: 2 1349 # 11: 2350 # 12: 2 2 1 1351 # 13: 2352 # 14: 2 1353 # 15: 2354 #355 # mult_idx_2:356 # 0: 1 3 2 2357 # 1: 2 3 2 2358 mult_idx_1 = pd.MultiIndex.from_product([[1, 2]] * 4)359 mult_idx_2 = pd.MultiIndex.from_tuples([(1, 3, 2, 2), (2, 3, 2, 2)])360 # show the tuple orderings, which get_indexer() should respect361 assert mult_idx_1[7] < mult_idx_2[0] < mult_idx_1[8]362 assert mult_idx_1[-1] < mult_idx_2[1]363 indexer = mult_idx_1.get_indexer(mult_idx_2)364 expected = np.array([-1, -1], dtype=indexer.dtype)365 tm.assert_almost_equal(expected, indexer)366 backfill_indexer = mult_idx_1.get_indexer(mult_idx_2, method="bfill")367 expected = np.array([8, -1], dtype=backfill_indexer.dtype)368 tm.assert_almost_equal(expected, backfill_indexer)369 pad_indexer = mult_idx_1.get_indexer(mult_idx_2, method="ffill")370 expected = np.array([7, 15], dtype=pad_indexer.dtype)371 tm.assert_almost_equal(expected, pad_indexer)372def test_getitem(idx):373 # scalar374 assert idx[2] == ("bar", "one")375 # slice376 result = idx[2:5]377 expected = idx[[2, 3, 4]]378 assert result.equals(expected)379 # boolean380 result = idx[[True, False, True, False, True, True]]381 result2 = idx[np.array([True, False, True, False, True, True])]382 expected = idx[[0, 2, 4, 5]]383 assert result.equals(expected)384 assert result2.equals(expected)385def test_getitem_group_select(idx):386 sorted_idx, _ = idx.sortlevel(0)387 assert sorted_idx.get_loc("baz") == slice(3, 4)388 assert sorted_idx.get_loc("foo") == slice(0, 2)389@pytest.mark.parametrize("ind1", [[True] * 5, pd.Index([True] * 5)])390@pytest.mark.parametrize(391 "ind2",392 [[True, False, True, False, False], pd.Index([True, False, True, False, False])],393)394def test_getitem_bool_index_all(ind1, ind2):395 # GH#22533396 idx = MultiIndex.from_tuples([(10, 1), (20, 2), (30, 3), (40, 4), (50, 5)])397 tm.assert_index_equal(idx[ind1], idx)398 expected = MultiIndex.from_tuples([(10, 1), (30, 3)])399 tm.assert_index_equal(idx[ind2], expected)400@pytest.mark.parametrize("ind1", [[True], pd.Index([True])])401@pytest.mark.parametrize("ind2", [[False], pd.Index([False])])402def test_getitem_bool_index_single(ind1, ind2):403 # GH#22533404 idx = MultiIndex.from_tuples([(10, 1)])405 tm.assert_index_equal(idx[ind1], idx)406 expected = pd.MultiIndex(407 levels=[np.array([], dtype=np.int64), np.array([], dtype=np.int64)],408 codes=[[], []],409 )410 tm.assert_index_equal(idx[ind2], expected)411class TestGetLoc:412 def test_get_loc(self, idx):413 assert idx.get_loc(("foo", "two")) == 1414 assert idx.get_loc(("baz", "two")) == 3415 with pytest.raises(KeyError, match=r"^10$"):416 idx.get_loc(("bar", "two"))417 with pytest.raises(KeyError, match=r"^'quux'$"):418 idx.get_loc("quux")419 msg = "only the default get_loc method is currently supported for MultiIndex"420 with pytest.raises(NotImplementedError, match=msg):421 idx.get_loc("foo", method="nearest")422 # 3 levels423 index = MultiIndex(424 levels=[Index(np.arange(4)), Index(np.arange(4)), Index(np.arange(4))],425 codes=[426 np.array([0, 0, 1, 2, 2, 2, 3, 3]),427 np.array([0, 1, 0, 0, 0, 1, 0, 1]),428 np.array([1, 0, 1, 1, 0, 0, 1, 0]),429 ],430 )431 with pytest.raises(KeyError, match=r"^\(1, 1\)$"):432 index.get_loc((1, 1))433 assert index.get_loc((2, 0)) == slice(3, 5)434 def test_get_loc_duplicates(self):435 index = Index([2, 2, 2, 2])436 result = index.get_loc(2)437 expected = slice(0, 4)438 assert result == expected439 index = Index(["c", "a", "a", "b", "b"])440 rs = index.get_loc("c")441 xp = 0442 assert rs == xp443 with pytest.raises(KeyError):444 index.get_loc(2)445 def test_get_loc_level(self):446 index = MultiIndex(447 levels=[Index(np.arange(4)), Index(np.arange(4)), Index(np.arange(4))],448 codes=[449 np.array([0, 0, 1, 2, 2, 2, 3, 3]),450 np.array([0, 1, 0, 0, 0, 1, 0, 1]),451 np.array([1, 0, 1, 1, 0, 0, 1, 0]),452 ],453 )454 loc, new_index = index.get_loc_level((0, 1))455 expected = slice(1, 2)456 exp_index = index[expected].droplevel(0).droplevel(0)457 assert loc == expected458 assert new_index.equals(exp_index)459 loc, new_index = index.get_loc_level((0, 1, 0))460 expected = 1461 assert loc == expected462 assert new_index is None463 with pytest.raises(KeyError, match=r"^\(2, 2\)$"):464 index.get_loc_level((2, 2))465 # GH 22221: unused label466 with pytest.raises(KeyError, match=r"^2$"):467 index.drop(2).get_loc_level(2)468 # Unused label on unsorted level:469 with pytest.raises(KeyError, match=r"^2$"):470 index.drop(1, level=2).get_loc_level(2, level=2)471 index = MultiIndex(472 levels=[[2000], list(range(4))],473 codes=[np.array([0, 0, 0, 0]), np.array([0, 1, 2, 3])],474 )475 result, new_index = index.get_loc_level((2000, slice(None, None)))476 expected = slice(None, None)477 assert result == expected478 assert new_index.equals(index.droplevel(0))479 @pytest.mark.parametrize("dtype1", [int, float, bool, str])480 @pytest.mark.parametrize("dtype2", [int, float, bool, str])481 def test_get_loc_multiple_dtypes(self, dtype1, dtype2):482 # GH 18520483 levels = [np.array([0, 1]).astype(dtype1), np.array([0, 1]).astype(dtype2)]484 idx = pd.MultiIndex.from_product(levels)485 assert idx.get_loc(idx[2]) == 2486 @pytest.mark.parametrize("level", [0, 1])487 @pytest.mark.parametrize("dtypes", [[int, float], [float, int]])488 def test_get_loc_implicit_cast(self, level, dtypes):489 # GH 18818, GH 15994 : as flat index, cast int to float and vice-versa490 levels = [["a", "b"], ["c", "d"]]491 key = ["b", "d"]492 lev_dtype, key_dtype = dtypes493 levels[level] = np.array([0, 1], dtype=lev_dtype)494 key[level] = key_dtype(1)495 idx = MultiIndex.from_product(levels)496 assert idx.get_loc(tuple(key)) == 3497 def test_get_loc_cast_bool(self):498 # GH 19086 : int is casted to bool, but not vice-versa499 levels = [[False, True], np.arange(2, dtype="int64")]500 idx = MultiIndex.from_product(levels)501 assert idx.get_loc((0, 1)) == 1502 assert idx.get_loc((1, 0)) == 2503 with pytest.raises(KeyError, match=r"^\(False, True\)$"):504 idx.get_loc((False, True))505 with pytest.raises(KeyError, match=r"^\(True, False\)$"):506 idx.get_loc((True, False))507 @pytest.mark.parametrize("level", [0, 1])508 def test_get_loc_nan(self, level, nulls_fixture):509 # GH 18485 : NaN in MultiIndex510 levels = [["a", "b"], ["c", "d"]]511 key = ["b", "d"]512 levels[level] = np.array([0, nulls_fixture], dtype=type(nulls_fixture))513 key[level] = nulls_fixture514 idx = MultiIndex.from_product(levels)515 assert idx.get_loc(tuple(key)) == 3516 def test_get_loc_missing_nan(self):517 # GH 8569518 idx = MultiIndex.from_arrays([[1.0, 2.0], [3.0, 4.0]])519 assert isinstance(idx.get_loc(1), slice)520 with pytest.raises(KeyError, match=r"^3$"):521 idx.get_loc(3)522 with pytest.raises(KeyError, match=r"^nan$"):523 idx.get_loc(np.nan)524 with pytest.raises(TypeError, match="unhashable type: 'list'"):525 # listlike/non-hashable raises TypeError526 idx.get_loc([np.nan])527 def test_get_loc_with_values_including_missing_values(self):528 # issue 19132529 idx = MultiIndex.from_product([[np.nan, 1]] * 2)530 expected = slice(0, 2, None)531 assert idx.get_loc(np.nan) == expected532 idx = MultiIndex.from_arrays([[np.nan, 1, 2, np.nan]])533 expected = np.array([True, False, False, True])534 tm.assert_numpy_array_equal(idx.get_loc(np.nan), expected)535 idx = MultiIndex.from_product([[np.nan, 1]] * 3)536 expected = slice(2, 4, None)537 assert idx.get_loc((np.nan, 1)) == expected538 def test_get_loc_duplicates2(self):539 # TODO: de-duplicate with test_get_loc_duplicates above?540 index = MultiIndex(541 levels=[["D", "B", "C"], [0, 26, 27, 37, 57, 67, 75, 82]],542 codes=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2], [1, 3, 4, 6, 0, 2, 2, 3, 5, 7]],543 names=["tag", "day"],544 )545 assert index.get_loc("D") == slice(0, 3)546class TestWhere:547 def test_where(self):548 i = MultiIndex.from_tuples([("A", 1), ("A", 2)])549 msg = r"\.where is not supported for MultiIndex operations"550 with pytest.raises(NotImplementedError, match=msg):551 i.where(True)552 @pytest.mark.parametrize("klass", [list, tuple, np.array, pd.Series])553 def test_where_array_like(self, klass):554 i = MultiIndex.from_tuples([("A", 1), ("A", 2)])555 cond = [False, True]556 msg = r"\.where is not supported for MultiIndex operations"557 with pytest.raises(NotImplementedError, match=msg):558 i.where(klass(cond))559class TestContains:560 def test_contains_top_level(self):561 midx = MultiIndex.from_product([["A", "B"], [1, 2]])562 assert "A" in midx563 assert "A" not in midx._engine564 def test_contains_with_nat(self):565 # MI with a NaT566 mi = MultiIndex(567 levels=[["C"], pd.date_range("2012-01-01", periods=5)],568 codes=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],569 names=[None, "B"],570 )571 assert ("C", pd.Timestamp("2012-01-01")) in mi572 for val in mi.values:573 assert val in mi574 def test_contains(self, idx):575 assert ("foo", "two") in idx576 assert ("bar", "two") not in idx577 assert None not in idx578 def test_contains_with_missing_value(self):579 # GH#19132580 idx = MultiIndex.from_arrays([[1, np.nan, 2]])581 assert np.nan in idx582 idx = MultiIndex.from_arrays([[1, 2], [np.nan, 3]])583 assert np.nan not in idx584 assert (1, np.nan) in idx585 def test_multiindex_contains_dropped(self):586 # GH#19027587 # test that dropped MultiIndex levels are not in the MultiIndex588 # despite continuing to be in the MultiIndex's levels589 idx = MultiIndex.from_product([[1, 2], [3, 4]])590 assert 2 in idx591 idx = idx.drop(2)592 # drop implementation keeps 2 in the levels593 assert 2 in idx.levels[0]594 # but it should no longer be in the index itself595 assert 2 not in idx596 # also applies to strings597 idx = MultiIndex.from_product([["a", "b"], ["c", "d"]])598 assert "a" in idx599 idx = idx.drop("a")600 assert "a" in idx.levels[0]601 assert "a" not in idx602 def test_contains_td64_level(self):603 # GH#24570604 tx = pd.timedelta_range("09:30:00", "16:00:00", freq="30 min")605 idx = MultiIndex.from_arrays([tx, np.arange(len(tx))])606 assert tx[0] in idx607 assert "element_not_exit" not in idx608 assert "0 day 09:30:00" in idx609 @pytest.mark.slow610 def test_large_mi_contains(self):611 # GH#10645612 result = MultiIndex.from_arrays([range(10 ** 6), range(10 ** 6)])613 assert not (10 ** 6, 0) in result614def test_timestamp_multiindex_indexer():615 # https://github.com/pandas-dev/pandas/issues/26944616 idx = pd.MultiIndex.from_product(617 [618 pd.date_range("2019-01-01T00:15:33", periods=100, freq="H", name="date"),619 ["x"],620 [3],621 ]622 )623 df = pd.DataFrame({"foo": np.arange(len(idx))}, idx)624 result = df.loc[pd.IndexSlice["2019-1-2":, "x", :], "foo"]625 qidx = pd.MultiIndex.from_product(626 [627 pd.date_range(628 start="2019-01-02T00:15:33",629 end="2019-01-05T02:15:33",630 freq="H",631 name="date",632 ),633 ["x"],634 [3],635 ]636 )637 should_be = pd.Series(data=np.arange(24, len(qidx) + 24), index=qidx, name="foo")638 tm.assert_series_equal(result, should_be)639@pytest.mark.parametrize(640 "index_arr,expected,target,algo",641 [642 ([[np.nan, "a", "b"], ["c", "d", "e"]], 0, np.nan, "left"),643 ([[np.nan, "a", "b"], ["c", "d", "e"]], 1, (np.nan, "c"), "right"),644 ([["a", "b", "c"], ["d", np.nan, "d"]], 1, ("b", np.nan), "left"),645 ],646)647def test_get_slice_bound_with_missing_value(index_arr, expected, target, algo):648 # issue 19132649 idx = MultiIndex.from_arrays(index_arr)650 result = idx.get_slice_bound(target, side=algo, kind="loc")651 assert result == expected652@pytest.mark.parametrize(653 "index_arr,expected,start_idx,end_idx",654 [655 ([[np.nan, 1, 2], [3, 4, 5]], slice(0, 2, None), np.nan, 1),656 ([[np.nan, 1, 2], [3, 4, 5]], slice(0, 3, None), np.nan, (2, 5)),657 ([[1, 2, 3], [4, np.nan, 5]], slice(1, 3, None), (2, np.nan), 3),658 ([[1, 2, 3], [4, np.nan, 5]], slice(1, 3, None), (2, np.nan), (3, 5)),659 ],660)661def test_slice_indexer_with_missing_value(index_arr, expected, start_idx, end_idx):662 # issue 19132663 idx = MultiIndex.from_arrays(index_arr)664 result = idx.slice_indexer(start=start_idx, end=end_idx)665 assert result == expected666def test_pyint_engine():667 # GH#18519 : when combinations of codes cannot be represented in 64668 # bits, the index underlying the MultiIndex engine works with Python669 # integers, rather than uint64.670 N = 5671 keys = [672 tuple(l)673 for l in [674 [0] * 10 * N,675 [1] * 10 * N,676 [2] * 10 * N,677 [np.nan] * N + [2] * 9 * N,678 [0] * N + [2] * 9 * N,679 [np.nan] * N + [2] * 8 * N + [0] * N,680 ]681 ]682 # Each level contains 4 elements (including NaN), so it is represented683 # in 2 bits, for a total of 2*N*10 = 100 > 64 bits. If we were using a684 # 64 bit engine and truncating the first levels, the fourth and fifth685 # keys would collide; if truncating the last levels, the fifth and686 # sixth; if rotating bits rather than shifting, the third and fifth.687 for idx in range(len(keys)):688 index = MultiIndex.from_tuples(keys)689 assert index.get_loc(keys[idx]) == idx690 expected = np.arange(idx + 1, dtype=np.intp)691 result = index.get_indexer([keys[i] for i in expected])692 tm.assert_numpy_array_equal(result, expected)693 # With missing key:694 idces = range(len(keys))695 expected = np.array([-1] + list(idces), dtype=np.intp)696 missing = tuple([0, 1] * 5 * N)697 result = index.get_indexer([missing] + [keys[i] for i in idces])...
networks.py
Source:networks.py
1# Copyright (c) Microsoft Corporation.2# Licensed under the MIT License.3import torch4import torch.nn as nn5import functools6from torch.autograd import Variable7import numpy as np8from torch.nn.utils import spectral_norm9# from util.util import SwitchNorm2d10import torch.nn.functional as F11###############################################################################12# Functions13###############################################################################14def weights_init(m):15 classname = m.__class__.__name__16 if classname.find("Conv") != -1:17 m.weight.data.normal_(0.0, 0.02)18 elif classname.find("BatchNorm2d") != -1:19 m.weight.data.normal_(1.0, 0.02)20 m.bias.data.fill_(0)21def get_norm_layer(norm_type="instance"):22 if norm_type == "batch":23 norm_layer = functools.partial(nn.BatchNorm2d, affine=True)24 elif norm_type == "instance":25 norm_layer = functools.partial(nn.InstanceNorm2d, affine=False)26 elif norm_type == "spectral":27 norm_layer = spectral_norm()28 elif norm_type == "SwitchNorm":29 norm_layer = SwitchNorm2d30 else:31 raise NotImplementedError("normalization layer [%s] is not found" % norm_type)32 return norm_layer33def print_network(net):34 if isinstance(net, list):35 net = net[0]36 num_params = 037 for param in net.parameters():38 num_params += param.numel()39 print(net)40 print("Total number of parameters: %d" % num_params)41class GlobalGenerator_DCDCv2(nn.Module):42 def __init__(43 self,44 input_nc,45 output_nc,46 ngf=64,47 k_size=3,48 n_downsampling=8,49 norm_layer=nn.BatchNorm2d,50 padding_type="reflect",51 opt=None,52 ):53 super(GlobalGenerator_DCDCv2, self).__init__()54 activation = nn.ReLU(True)55 model = [56 nn.ReflectionPad2d(3),57 nn.Conv2d(input_nc, min(ngf, opt.mc), kernel_size=7, padding=0),58 norm_layer(ngf),59 activation,60 ]61 ### downsample62 for i in range(opt.start_r):63 mult = 2 ** i64 model += [65 nn.Conv2d(66 min(ngf * mult, opt.mc),67 min(ngf * mult * 2, opt.mc),68 kernel_size=k_size,69 stride=2,70 padding=1,71 ),72 norm_layer(min(ngf * mult * 2, opt.mc)),73 activation,74 ]75 for i in range(opt.start_r, n_downsampling - 1):76 mult = 2 ** i77 model += [78 nn.Conv2d(79 min(ngf * mult, opt.mc),80 min(ngf * mult * 2, opt.mc),81 kernel_size=k_size,82 stride=2,83 padding=1,84 ),85 norm_layer(min(ngf * mult * 2, opt.mc)),86 activation,87 ]88 model += [89 ResnetBlock(90 min(ngf * mult * 2, opt.mc),91 padding_type=padding_type,92 activation=activation,93 norm_layer=norm_layer,94 opt=opt,95 )96 ]97 model += [98 ResnetBlock(99 min(ngf * mult * 2, opt.mc),100 padding_type=padding_type,101 activation=activation,102 norm_layer=norm_layer,103 opt=opt,104 )105 ]106 mult = 2 ** (n_downsampling - 1)107 if opt.spatio_size == 32:108 model += [109 nn.Conv2d(110 min(ngf * mult, opt.mc),111 min(ngf * mult * 2, opt.mc),112 kernel_size=k_size,113 stride=2,114 padding=1,115 ),116 norm_layer(min(ngf * mult * 2, opt.mc)),117 activation,118 ]119 if opt.spatio_size == 64:120 model += [121 ResnetBlock(122 min(ngf * mult * 2, opt.mc),123 padding_type=padding_type,124 activation=activation,125 norm_layer=norm_layer,126 opt=opt,127 )128 ]129 model += [130 ResnetBlock(131 min(ngf * mult * 2, opt.mc),132 padding_type=padding_type,133 activation=activation,134 norm_layer=norm_layer,135 opt=opt,136 )137 ]138 # model += [nn.Conv2d(min(ngf * mult * 2, opt.mc), min(ngf, opt.mc), 1, 1)]139 if opt.feat_dim > 0:140 model += [nn.Conv2d(min(ngf * mult * 2, opt.mc), opt.feat_dim, 1, 1)]141 self.encoder = nn.Sequential(*model)142 # decode143 model = []144 if opt.feat_dim > 0:145 model += [nn.Conv2d(opt.feat_dim, min(ngf * mult * 2, opt.mc), 1, 1)]146 # model += [nn.Conv2d(min(ngf, opt.mc), min(ngf * mult * 2, opt.mc), 1, 1)]147 o_pad = 0 if k_size == 4 else 1148 mult = 2 ** n_downsampling149 model += [150 ResnetBlock(151 min(ngf * mult, opt.mc),152 padding_type=padding_type,153 activation=activation,154 norm_layer=norm_layer,155 opt=opt,156 )157 ]158 if opt.spatio_size == 32:159 model += [160 nn.ConvTranspose2d(161 min(ngf * mult, opt.mc),162 min(int(ngf * mult / 2), opt.mc),163 kernel_size=k_size,164 stride=2,165 padding=1,166 output_padding=o_pad,167 ),168 norm_layer(min(int(ngf * mult / 2), opt.mc)),169 activation,170 ]171 if opt.spatio_size == 64:172 model += [173 ResnetBlock(174 min(ngf * mult, opt.mc),175 padding_type=padding_type,176 activation=activation,177 norm_layer=norm_layer,178 opt=opt,179 )180 ]181 for i in range(1, n_downsampling - opt.start_r):182 mult = 2 ** (n_downsampling - i)183 model += [184 ResnetBlock(185 min(ngf * mult, opt.mc),186 padding_type=padding_type,187 activation=activation,188 norm_layer=norm_layer,189 opt=opt,190 )191 ]192 model += [193 ResnetBlock(194 min(ngf * mult, opt.mc),195 padding_type=padding_type,196 activation=activation,197 norm_layer=norm_layer,198 opt=opt,199 )200 ]201 model += [202 nn.ConvTranspose2d(203 min(ngf * mult, opt.mc),204 min(int(ngf * mult / 2), opt.mc),205 kernel_size=k_size,206 stride=2,207 padding=1,208 output_padding=o_pad,209 ),210 norm_layer(min(int(ngf * mult / 2), opt.mc)),211 activation,212 ]213 for i in range(n_downsampling - opt.start_r, n_downsampling):214 mult = 2 ** (n_downsampling - i)215 model += [216 nn.ConvTranspose2d(217 min(ngf * mult, opt.mc),218 min(int(ngf * mult / 2), opt.mc),219 kernel_size=k_size,220 stride=2,221 padding=1,222 output_padding=o_pad,223 ),224 norm_layer(min(int(ngf * mult / 2), opt.mc)),225 activation,226 ]227 if opt.use_segmentation_model:228 model += [nn.ReflectionPad2d(3), nn.Conv2d(min(ngf, opt.mc), output_nc, kernel_size=7, padding=0)]229 else:230 model += [231 nn.ReflectionPad2d(3),232 nn.Conv2d(min(ngf, opt.mc), output_nc, kernel_size=7, padding=0),233 nn.Tanh(),234 ]235 self.decoder = nn.Sequential(*model)236 # def forward(self, input, flow="enc_dec"):237 # if flow == "enc":238 # return self.encoder(input)239 # elif flow == "dec":240 # return self.decoder(input)241 # elif flow == "enc_dec":242 # x = self.encoder(input)243 # x = self.decoder(x)244 # return x245 def reparameterize(self, mu, logvar):246 std = torch.exp(0.5*logvar)247 eps = torch.randn_like(std)248 return mu + eps*std249 def forward(self, input, flow="enc_dec"):250 if flow == "enc":251 h = self.encoder(input)252 if not self.eval_:253 mean = self.mean_layer(h)254 var = self.var_layer(h)255 h = self.reparameterize(mean, var)256 return h, mean, var257 else:258 return h259 elif flow == "dec":260 return self.decoder(input)261 elif flow == "enc_dec":262 z_x = self.encoder(input)263 if not self.eval_:264 mean = self.mean_layer(z_x)265 var = self.var_layer(z_x)266 z_x = self.reparameterize(mean, var)267 x = self.decoder(z_x)268 return z_x, mean, var, x269 return self.decoder(z_x)270# Define a resnet block271class ResnetBlock(nn.Module):272 def __init__(273 self, dim, padding_type, norm_layer, opt, activation=nn.ReLU(True), use_dropout=False, dilation=1274 ):275 super(ResnetBlock, self).__init__()276 self.opt = opt277 self.dilation = dilation278 self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, activation, use_dropout)279 def build_conv_block(self, dim, padding_type, norm_layer, activation, use_dropout):280 conv_block = []281 p = 0282 if padding_type == "reflect":283 conv_block += [nn.ReflectionPad2d(self.dilation)]284 elif padding_type == "replicate":285 conv_block += [nn.ReplicationPad2d(self.dilation)]286 elif padding_type == "zero":287 p = self.dilation288 else:289 raise NotImplementedError("padding [%s] is not implemented" % padding_type)290 conv_block += [291 nn.Conv2d(dim, dim, kernel_size=3, padding=p, dilation=self.dilation),292 norm_layer(dim),293 activation,294 ]295 if use_dropout:296 conv_block += [nn.Dropout(0.5)]297 p = 0298 if padding_type == "reflect":299 conv_block += [nn.ReflectionPad2d(1)]300 elif padding_type == "replicate":301 conv_block += [nn.ReplicationPad2d(1)]302 elif padding_type == "zero":303 p = 1304 else:305 raise NotImplementedError("padding [%s] is not implemented" % padding_type)306 conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, dilation=1), norm_layer(dim)]307 return nn.Sequential(*conv_block)308 def forward(self, x):309 out = x + self.conv_block(x)310 return out311class Encoder(nn.Module):312 def __init__(self, input_nc, output_nc, ngf=32, n_downsampling=4, norm_layer=nn.BatchNorm2d):313 super(Encoder, self).__init__()314 self.output_nc = output_nc315 model = [316 nn.ReflectionPad2d(3),317 nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0),318 norm_layer(ngf),319 nn.ReLU(True),320 ]321 ### downsample322 for i in range(n_downsampling):323 mult = 2 ** i324 model += [325 nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1),326 norm_layer(ngf * mult * 2),327 nn.ReLU(True),328 ]329 ### upsample330 for i in range(n_downsampling):331 mult = 2 ** (n_downsampling - i)332 model += [333 nn.ConvTranspose2d(334 ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=2, padding=1, output_padding=1335 ),336 norm_layer(int(ngf * mult / 2)),337 nn.ReLU(True),338 ]339 model += [nn.ReflectionPad2d(3), nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0), nn.Tanh()]340 self.model = nn.Sequential(*model)341 def forward(self, input, inst):342 outputs = self.model(input)343 # instance-wise average pooling344 outputs_mean = outputs.clone()345 inst_list = np.unique(inst.cpu().numpy().astype(int))346 for i in inst_list:347 for b in range(input.size()[0]):348 indices = (inst[b : b + 1] == int(i)).nonzero() # n x 4349 for j in range(self.output_nc):350 output_ins = outputs[indices[:, 0] + b, indices[:, 1] + j, indices[:, 2], indices[:, 3]]351 mean_feat = torch.mean(output_ins).expand_as(output_ins)352 outputs_mean[353 indices[:, 0] + b, indices[:, 1] + j, indices[:, 2], indices[:, 3]354 ] = mean_feat355 return outputs_mean356def SN(module, mode=True):357 if mode:358 return torch.nn.utils.spectral_norm(module)359 return module360class NonLocalBlock2D_with_mask_Res(nn.Module):361 def __init__(362 self,363 in_channels,364 inter_channels,365 mode="add",366 re_norm=False,367 temperature=1.0,368 use_self=False,369 cosin=False,370 ):371 super(NonLocalBlock2D_with_mask_Res, self).__init__()372 self.cosin = cosin373 self.renorm = re_norm374 self.in_channels = in_channels375 self.inter_channels = inter_channels376 self.g = nn.Conv2d(377 in_channels=self.in_channels, out_channels=self.inter_channels, kernel_size=1, stride=1, padding=0378 )379 self.W = nn.Conv2d(380 in_channels=self.inter_channels, out_channels=self.in_channels, kernel_size=1, stride=1, padding=0381 )382 # for pytorch 0.3.1383 # nn.init.constant(self.W.weight, 0)384 # nn.init.constant(self.W.bias, 0)385 # for pytorch 0.4.0386 nn.init.constant_(self.W.weight, 0)387 nn.init.constant_(self.W.bias, 0)388 self.theta = nn.Conv2d(389 in_channels=self.in_channels, out_channels=self.inter_channels, kernel_size=1, stride=1, padding=0390 )391 self.phi = nn.Conv2d(392 in_channels=self.in_channels, out_channels=self.inter_channels, kernel_size=1, stride=1, padding=0393 )394 self.mode = mode395 self.temperature = temperature396 self.use_self = use_self397 norm_layer = get_norm_layer(norm_type="instance")398 activation = nn.ReLU(True)399 model = []400 for i in range(3):401 model += [402 ResnetBlock(403 inter_channels,404 padding_type="reflect",405 activation=activation,406 norm_layer=norm_layer,407 opt=None,408 )409 ]410 self.res_block = nn.Sequential(*model)411 def forward(self, x, mask): ## The shape of mask is Batch*1*H*W412 batch_size = x.size(0)413 g_x = self.g(x).view(batch_size, self.inter_channels, -1)414 g_x = g_x.permute(0, 2, 1)415 theta_x = self.theta(x).view(batch_size, self.inter_channels, -1)416 theta_x = theta_x.permute(0, 2, 1)417 phi_x = self.phi(x).view(batch_size, self.inter_channels, -1)418 if self.cosin:419 theta_x = F.normalize(theta_x, dim=2)420 phi_x = F.normalize(phi_x, dim=1)421 f = torch.matmul(theta_x, phi_x)422 f /= self.temperature423 f_div_C = F.softmax(f, dim=2)424 tmp = 1 - mask425 mask = F.interpolate(mask, (x.size(2), x.size(3)), mode="bilinear")426 mask[mask > 0] = 1.0427 mask = 1 - mask428 tmp = F.interpolate(tmp, (x.size(2), x.size(3)))429 mask *= tmp430 mask_expand = mask.view(batch_size, 1, -1)431 mask_expand = mask_expand.repeat(1, x.size(2) * x.size(3), 1)432 # mask = 1 - mask433 # mask=F.interpolate(mask,(x.size(2),x.size(3)))434 # mask_expand=mask.view(batch_size,1,-1)435 # mask_expand=mask_expand.repeat(1,x.size(2)*x.size(3),1)436 if self.use_self:437 mask_expand[:, range(x.size(2) * x.size(3)), range(x.size(2) * x.size(3))] = 1.0438 # print(mask_expand.shape)439 # print(f_div_C.shape)440 f_div_C = mask_expand * f_div_C441 if self.renorm:442 f_div_C = F.normalize(f_div_C, p=1, dim=2)443 ###########################444 y = torch.matmul(f_div_C, g_x)445 y = y.permute(0, 2, 1).contiguous()446 y = y.view(batch_size, self.inter_channels, *x.size()[2:])447 W_y = self.W(y)448 W_y = self.res_block(W_y)449 if self.mode == "combine":450 full_mask = mask.repeat(1, self.inter_channels, 1, 1)451 z = full_mask * x + (1 - full_mask) * W_y452 return z453class Z_xr_Discriminator(nn.Module):454 def __init__(self, input_nc, ndf=64, n_layers=5):455 super(Z_xr_Discriminator, self).__init__()456 model = [nn.ReflectionPad2d(1),457 nn.utils.spectral_norm(458 nn.Conv2d(input_nc, ndf, kernel_size=4, stride=2, padding=0, bias=True)),459 nn.LeakyReLU(0.2, True)]460 for i in range(1, n_layers - 2):461 mult = 2 ** (i - 1)462 model += [nn.ReflectionPad2d(1),463 nn.utils.spectral_norm(464 nn.Conv2d(ndf * mult, ndf * mult * 2, kernel_size=4, stride=2, padding=0, bias=True)),465 nn.LeakyReLU(0.2, True)]466 mult = 2 ** (n_layers - 2 - 1)467 model += [nn.ReflectionPad2d(1),468 nn.utils.spectral_norm(469 nn.Conv2d(ndf * mult, ndf * mult * 2, kernel_size=4, stride=1, padding=0, bias=True)),470 nn.LeakyReLU(0.2, True)]471 # Class Activation Map472 mult = 2 ** (n_layers - 2)473 self.gap_fc = nn.utils.spectral_norm(nn.Linear(ndf * mult, 1, bias=False))474 self.gmp_fc = nn.utils.spectral_norm(nn.Linear(ndf * mult, 1, bias=False))475 self.conv1x1 = nn.Conv2d(ndf * mult * 2, ndf * mult, kernel_size=1, stride=1, bias=True)476 self.leaky_relu = nn.LeakyReLU(0.2, True)477 self.pad = nn.ReflectionPad2d(1)478 self.conv = nn.utils.spectral_norm(479 nn.Conv2d(ndf * mult, 1, kernel_size=4, stride=1, padding=0, bias=False))480 self.model = nn.Sequential(*model)481 def forward(self, input, need_each_activation=False):482 each_activations = []483 if need_each_activation:484 x = input485 for i in range(len(self.model)):486 x = self.model[i](x)487 if isinstance(self.model[i], torch.nn.modules.activation.LeakyReLU):488 each_activations.append(x)489 else:490 x = self.model(input)491 gap = torch.nn.functional.adaptive_avg_pool2d(x, 1)492 gap_weight = list(self.gap_fc.parameters())[0]493 gap = x * gap_weight.unsqueeze(2).unsqueeze(3)494 gmp = torch.nn.functional.adaptive_max_pool2d(x, 1)495 gmp_weight = list(self.gmp_fc.parameters())[0]496 gmp = x * gmp_weight.unsqueeze(2).unsqueeze(3)497 # gap_logit = self.gap_fc(gap.view(x.shape[0], -1))498 # gmp_logit = self.gmp_fc(gmp.view(x.shape[0], -1))499 # cam_logit = torch.cat([gap_logit, gmp_logit], 1)500 x = torch.cat([gap, gmp], 1)501 x = self.leaky_relu(self.conv1x1(x))502 # heatmap = torch.sum(x, dim=1, keepdim=True)503 x = self.pad(x)504 out = self.conv(x)505 if need_each_activation:506 return out, each_activations507 else:...
autoencoder.py
Source:autoencoder.py
1from caffe import layers as L, params as P 2import sys3sys.path.append("utils") 4sys.path.append("utils/autoencoder") 5from basis import *6from __future__ import print_function7height=None8width=None9def conv1_autoencoder(split, batch_sz):10 n = caffe.NetSpec()11 n.data, n.label = L.ImageData(image_data_param=dict(source=split, batch_size=batch_sz,new_height=height, new_width=width,is_color=False),ntop=2)12 n.silence = L.Silence(n.label, ntop=0)13 n.flatdata_i = L.Flatten(n.data)14 15 n.conv1 = conv(n.data, 5, 5, 64, pad=2)16 n.bn1 = L.BatchNorm(n.conv1, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])17 n.scale1 = L.Scale(n.bn1, bias_term=True, in_place=True) 18 n.relu1 = L.ReLU(n.scale1, relu_param=dict(negative_slope=0.1))19 n.pool1 = max_pool(n.relu1, 2, stride=2) 20 21 n.code = conv(n.pool1, 5, 5, 64, pad=2)22 23 n.upsample1 = L.Deconvolution(n.code, param=dict(lr_mult=0 ,decay_mult = 0), convolution_param=dict(group=64, num_output=64, kernel_size=4, stride=2, pad=1, bias_term=False,weight_filler=dict(type="bilinear")))24 n.deconv1 = conv(n.upsample1, 5, 5, 1, pad=2) 25 n.debn1 = L.BatchNorm(n.deconv1, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])26 n.descale1 = L.Scale(n.debn1, bias_term=True, in_place=True) 27 n.derelu1 = L.ReLU(n.descale1, relu_param=dict(negative_slope=0.1))28 29 n.flatdata_o = L.Flatten(n.derelu1)30 n.loss_s = L.SigmoidCrossEntropyLoss(n.flatdata_o, n.flatdata_i, loss_weight=1)31 n.loss_e = L.EuclideanLoss(n.flatdata_o, n.flatdata_i, loss_weight=0)32 return str(n.to_proto())33def conv2_autoencoder(split, batch_sz):34 n = caffe.NetSpec()35 n.data, n.label = L.ImageData(image_data_param=dict(source=split, batch_size=batch_sz,new_height=height, new_width=width,is_color=False),ntop=2)36 n.silence = L.Silence(n.label, ntop=0)37 n.flatdata_i = L.Flatten(n.data)38 39 n.conv1 = conv(n.data, 5, 5, 64, pad=2, no_back=True)40 n.bn1 = L.BatchNorm(n.conv1, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])41 n.scale1 = L.Scale(n.bn1, bias_term=True, in_place=True) 42 n.relu1 = L.ReLU(n.scale1,relu_param=dict(negative_slope=0.1))43 n.pool1 = max_pool(n.relu1, 2, stride=2) 44 45 n.conv2 = conv(n.pool1, 5, 5, 128, pad=2)46 n.bn2 = L.BatchNorm(n.conv2, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])47 n.scale2 = L.Scale(n.bn2, bias_term=True, in_place=True) 48 n.relu2 = L.ReLU(n.scale2, relu_param=dict(negative_slope=0.1))49 n.pool2 = max_pool(n.relu2, 2, stride=2)50 51 n.code = conv(n.pool2, 5, 5, 128, pad=2)52 n.upsample2 = L.Deconvolution(n.code, param=dict(lr_mult=0 ,decay_mult = 0), convolution_param=dict(group=128, num_output=128, kernel_size=4, stride=2, pad=1, bias_term=False,weight_filler=dict(type="bilinear")))53 n.deconv2 = conv(n.upsample2, 5, 5, 64, pad=2) 54 n.debn2 = L.BatchNorm(n.deconv2, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])55 n.descale2 = L.Scale(n.debn2, bias_term=True, in_place=True) 56 n.derelu2 = L.ReLU(n.descale2, relu_param=dict(negative_slope=0.1))57 n.upsample1 = L.Deconvolution(n.derelu2, param=dict(lr_mult=0 ,decay_mult = 0), convolution_param=dict(group=64, num_output=64, kernel_size=4, stride=2, pad=1, bias_term=False,weight_filler=dict(type="bilinear")))58 n.deconv1 = conv(n.upsample1, 5, 5, 1, pad=2, no_back=True) 59 n.debn1 = L.BatchNorm(n.deconv1, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])60 n.descale1 = L.Scale(n.debn1, bias_term=True, in_place=True) 61 n.derelu1 = L.ReLU(n.descale1, relu_param=dict(negative_slope=0.1))62 n.flatdata_o = L.Flatten(n.derelu1)63 n.loss_s = L.SigmoidCrossEntropyLoss(n.flatdata_o, n.flatdata_i, loss_weight=1)64 n.loss_e = L.EuclideanLoss(n.flatdata_o, n.flatdata_i, loss_weight=0)65 66 67 return str(n.to_proto())68def conv3_autoencoder(split, batch_sz):69 n = caffe.NetSpec()70 n.data, n.label = L.ImageData(image_data_param=dict(source=split, batch_size=batch_sz,new_height=height, new_width=width, is_color=False),ntop=2)71 n.silence = L.Silence(n.label, ntop=0)72 n.flatdata_i = L.Flatten(n.data)73 74 n.conv1 = conv(n.data, 5, 5, 64, pad=2, no_back=True)75 n.bn1 = L.BatchNorm(n.conv1, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])76 n.scale1 = L.Scale(n.bn1, bias_term=True, in_place=True) 77 n.relu1 = L.ReLU(n.scale1, relu_param=dict(negative_slope=0.1))78 n.pool1 = max_pool(n.relu1, 2, stride=2) 79 80 n.conv2 = conv(n.pool1, 5, 5, 128, pad=2, no_back=True)81 n.bn2 = L.BatchNorm(n.conv2, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])82 n.scale2 = L.Scale(n.bn2, bias_term=True, in_place=True) 83 n.relu2 = L.ReLU(n.scale2, relu_param=dict(negative_slope=0.1))84 n.pool2 = max_pool(n.relu2, 2, stride=2)85 86 n.conv3 = conv(n.pool2, 3, 3, 256, pad=1)87 n.bn3 = L.BatchNorm(n.conv3, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])88 n.scale3 = L.Scale(n.bn3, bias_term=True, in_place=True) 89 n.relu3 = L.ReLU(n.scale3, relu_param=dict(negative_slope=0.1)) 90 n.conv3_5 = conv(n.relu3, 3, 3, 512, pad=1)91 n.bn3_5 = L.BatchNorm(n.conv3_5, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])92 n.scale3_5 = L.Scale(n.bn3_5, bias_term=True, in_place=True) 93 n.relu3_5 = L.ReLU(n.scale3_5, relu_param=dict(negative_slope=0.1))94 n.pool3_5 = max_pool(n.relu3_5, 2, stride=2)95 96 n.code = conv(n.pool3_5, 3, 3, 512, pad=1)97 n.upsample3_5 = L.Deconvolution(n.code, param=dict(lr_mult=0 ,decay_mult = 0), convolution_param=dict(group=512, num_output=512, kernel_size=4, stride=2, pad=1, bias_term=False,weight_filler=dict(type="bilinear")))98 n.deconv3_5 = conv(n.upsample3_5, 3, 3, 256, pad=1, no_back=True) 99 n.debn3_5 = L.BatchNorm(n.deconv3_5, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])100 n.descale3_5 = L.Scale(n.debn3_5, bias_term=True, in_place=True) 101 n.derelu3_5 = L.ReLU(n.descale3_5, relu_param=dict(negative_slope=0.1))102 103 n.deconv3 = conv(n.derelu3_5, 5, 5,128, pad=2, no_back=True) 104 n.debn3 = L.BatchNorm(n.deconv3, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])105 n.descale3 = L.Scale(n.debn3, bias_term=True, in_place=True) 106 n.derelu3 = L.ReLU(n.descale3, relu_param=dict(negative_slope=0.1))107 108 n.upsample2 = L.Deconvolution(n.derelu3, param=dict(lr_mult=0 ,decay_mult = 0), convolution_param=dict(group=128, num_output=128, kernel_size=4, stride=2, pad=1, bias_term=False,weight_filler=dict(type="bilinear")))109 n.deconv2 = conv(n.upsample2, 5, 5, 64, pad=2, no_back=True) 110 n.debn2 = L.BatchNorm(n.deconv2, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])111 n.descale2 = L.Scale(n.debn2, bias_term=True, in_place=True) 112 n.derelu2 = L.ReLU(n.descale2, relu_param=dict(negative_slope=0.1))113 n.upsample1 = L.Deconvolution(n.derelu2, param=dict(lr_mult=0 ,decay_mult = 0), convolution_param=dict(group=64, num_output=64, kernel_size=4, stride=2, pad=1, bias_term=False,weight_filler=dict(type="bilinear")))114 n.deconv1 = conv(n.upsample1, 5, 5, 1, pad=2, no_back=True) 115 n.debn1 = L.BatchNorm(n.deconv1, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])116 n.descale1 = L.Scale(n.debn1, bias_term=True, in_place=True) 117 n.derelu1 = L.ReLU(n.descale1, relu_param=dict(negative_slope=0.1))118 n.flatdata_o = L.Flatten(n.derelu1)119 n.loss_s = L.SigmoidCrossEntropyLoss(n.flatdata_o, n.flatdata_i, loss_weight=1)120 n.loss_e = L.EuclideanLoss(n.flatdata_o, n.flatdata_i, loss_weight=0)121 122 123 return str(n.to_proto())124def conv4_autoencoder(split, batch_sz):125 n = caffe.NetSpec()126 n.data, n.label = L.ImageData(image_data_param=dict(source=split, batch_size=batch_sz,new_height=height, new_width=width,is_color=False),ntop=2)127 n.silence = L.Silence(n.label, ntop=0)128 n.flatdata_i = L.Flatten(n.data)129 130 n.conv1 = conv(n.data, 5, 5, 64, pad=2, no_back=True)131 n.bn1 = L.BatchNorm(n.conv1, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])132 n.scale1 = L.Scale(n.bn1, bias_term=True, in_place=True) 133 n.relu1 = L.ReLU(n.scale1, relu_param=dict(negative_slope=0.1))134 n.pool1 = max_pool(n.relu1, 2, stride=2) 135 136 n.conv2 = conv(n.pool1, 5, 5, 128, pad=2, no_back=True)137 n.bn2 = L.BatchNorm(n.conv2, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])138 n.scale2 = L.Scale(n.bn2, bias_term=True, in_place=True) 139 n.relu2 = L.ReLU(n.scale2, relu_param=dict(negative_slope=0.1))140 n.pool2 = max_pool(n.relu2, 2, stride=2)141 142 n.conv3 = conv(n.pool2, 3, 3, 256, pad=1, no_back=True)143 n.bn3 = L.BatchNorm(n.conv3, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])144 n.scale3 = L.Scale(n.bn3, bias_term=True, in_place=True) 145 n.relu3 = L.ReLU(n.scale3, relu_param=dict(negative_slope=0.1)) 146 n.conv3_5 = conv(n.relu3, 3, 3, 512, pad=1, no_back=True)147 n.bn3_5 = L.BatchNorm(n.conv3_5, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])148 n.scale3_5 = L.Scale(n.bn3_5, bias_term=True, in_place=True) 149 n.relu3_5 = L.ReLU(n.scale3_5, relu_param=dict(negative_slope=0.1))150 n.pool3_5 = max_pool(n.relu3_5, 2, stride=2)151 152 n.conv4 = conv(n.pool3_5, 3, 3, 512, pad=1)153 n.bn4 = L.BatchNorm(n.conv4, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])154 n.scale4 = L.Scale(n.bn4, bias_term=True, in_place=True) 155 n.relu4 = L.ReLU(n.scale4, relu_param=dict(negative_slope=0.1))156 157 n.code = conv(n.relu4, 3, 3, 512, pad=1)158 159 n.deconv4 = conv(n.code, 3, 3, 512, pad=1) 160 n.debn4 = L.BatchNorm(n.deconv4, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])161 n.descale4 = L.Scale(n.debn4, bias_term=True, in_place=True) 162 n.derelu4 = L.ReLU(n.descale4, relu_param=dict(negative_slope=0.1)) 163 n.upsample3_5 = L.Deconvolution(n.derelu4, param=dict(lr_mult=0 ,decay_mult = 0), convolution_param=dict(group=512, num_output=512, kernel_size=4, stride=2, pad=1, bias_term=False,weight_filler=dict(type="bilinear")))164 n.deconv3_5 = conv(n.upsample3_5, 3, 3, 256, pad=1, no_back=True) 165 n.debn3_5 = L.BatchNorm(n.deconv3_5, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])166 n.descale3_5 = L.Scale(n.debn3_5, bias_term=True, in_place=True) 167 n.derelu3_5 = L.ReLU(n.descale3_5, relu_param=dict(negative_slope=0.1))168 169 n.deconv3 = conv(n.derelu3_5, 5, 5,128, pad=2, no_back=True) 170 n.debn3 = L.BatchNorm(n.deconv3, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])171 n.descale3 = L.Scale(n.debn3, bias_term=True, in_place=True) 172 n.derelu3 = L.ReLU(n.descale3, relu_param=dict(negative_slope=0.1))173 174 n.upsample2 = L.Deconvolution(n.derelu3, param=dict(lr_mult=0 ,decay_mult = 0), convolution_param=dict(group=128, num_output=128, kernel_size=4, stride=2, pad=1, bias_term=False,weight_filler=dict(type="bilinear")))175 n.deconv2 = conv(n.upsample2, 5, 5, 64, pad=2, no_back=True) 176 n.debn2 = L.BatchNorm(n.deconv2, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])177 n.descale2 = L.Scale(n.debn2, bias_term=True, in_place=True) 178 n.derelu2 = L.ReLU(n.descale2, relu_param=dict(negative_slope=0.1))179 n.upsample1 = L.Deconvolution(n.derelu2, param=dict(lr_mult=0 ,decay_mult = 0), convolution_param=dict(group=64, num_output=64, kernel_size=4, stride=2, pad=1, bias_term=False,weight_filler=dict(type="bilinear")))180 n.deconv1 = conv(n.upsample1, 5, 5, 1, pad=2, no_back=True) 181 n.debn1 = L.BatchNorm(n.deconv1, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])182 n.descale1 = L.Scale(n.debn1, bias_term=True, in_place=True) 183 n.derelu1 = L.ReLU(n.descale1, relu_param=dict(negative_slope=0.1))184 n.flatdata_o = L.Flatten(n.derelu1)185 n.loss_s = L.SigmoidCrossEntropyLoss(n.flatdata_o, n.flatdata_i, loss_weight=1)186 n.loss_e = L.EuclideanLoss(n.flatdata_o, n.flatdata_i, loss_weight=0)187 return str(n.to_proto())188def vgg(split, batch_sz):189 n = caffe.NetSpec()190 n.data, n.label = L.ImageData(image_data_param=dict(shuffle=True,source=split, batch_size=batch_sz,new_height=32, new_width=100,is_color=False),ntop=2)191 n.silence = L.Silence(n.label, ntop=0)192 193 n.conv1 = conv(n.data, 5, 5, 64, pad=2)194 n.bn1 = L.BatchNorm(n.conv1, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])195 n.scale1 = L.Scale(n.bn1, bias_term=True, in_place=True) 196 n.relu1 = L.ReLU(n.scale1)197 n.pool1 = max_pool(n.relu1, 2, stride=2) 198 199 n.conv2 = conv(n.pool1, 5, 5, 128, pad=2)200 n.bn2 = L.BatchNorm(n.conv2, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])201 n.scale2 = L.Scale(n.bn2, bias_term=True, in_place=True) 202 n.relu2 = L.ReLU(n.scale2)203 n.pool2 = max_pool(n.relu2, 2, stride=2)204 205 n.conv3 = conv(n.pool2, 3, 3, 256, pad=1)206 n.bn3 = L.BatchNorm(n.conv3, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])207 n.scale3 = L.Scale(n.bn3, bias_term=True, in_place=True) 208 n.relu3 = L.ReLU(n.scale3) 209 n.conv3_5 = conv(n.relu3, 3, 3, 512, pad=1)210 n.bn3_5 = L.BatchNorm(n.conv3_5, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])211 n.scale3_5 = L.Scale(n.bn3_5, bias_term=True, in_place=True) 212 n.relu3_5 = L.ReLU(n.scale3_5)213 n.pool3_5 = max_pool(n.relu3_5, 2, stride=2)214 215 n.conv4 = conv(n.pool3_5, 3, 3, 512, pad=1)216 n.bn4 = L.BatchNorm(n.conv4, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])217 n.scale4 = L.Scale(n.bn4, bias_term=True, in_place=True) 218 n.relu4 = L.ReLU(n.scale4)219 220 n.fc5 = conv(n.relu4, 13, 4, 4096)221 n.bn5 = L.BatchNorm(n.fc1, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])222 n.scale5 = L.Scale(n.bn5, bias_term=True, in_place=True) 223 n.relu5 = L.ReLU(n.scale5)224 n.drop1 = L.Dropout(n.relu5, in_place=True)225 226 n.fc6 = conv(n.drop1, 1, 1, 4096)227 n.bn6 = L.BatchNorm(n.fc2, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])228 n.scale6 = L.Scale(n.bn6, bias_term=True, in_place=True) 229 n.relu6 = L.ReLU(n.scale6)230 n.drop2 = L.Dropout(n.relu6, in_place=True)231 232 n.fc_class = conv(n.drop2, 1, 1, 88172)233 n.bn7 = L.BatchNorm(n.fc_class, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])234 n.scale7 = L.Scale(n.bn7, bias_term=True, in_place=True) 235 n.relu7 = L.ReLU(n.scale7) 236 n.loss = L.SoftmaxWithLoss(n.relu7, n.label, loss_weight=1)...
pMC_mult.py
Source:pMC_mult.py
1# This file was created automatically by SWIG 1.3.29.2# Don't modify this file, modify the SWIG interface instead.3# This file is compatible with both classic and new-style classes.4import _pMC_mult5import new6new_instancemethod = new.instancemethod7def _swig_setattr_nondynamic(self,class_type,name,value,static=1):8 if (name == "thisown"): return self.this.own(value)9 if (name == "this"):10 if type(value).__name__ == 'PySwigObject':11 self.__dict__[name] = value12 return13 method = class_type.__swig_setmethods__.get(name,None)14 if method: return method(self,value)15 if (not static) or hasattr(self,name):16 self.__dict__[name] = value17 else:18 raise AttributeError("You cannot add attributes to %s" % self)19def _swig_setattr(self,class_type,name,value):20 return _swig_setattr_nondynamic(self,class_type,name,value,0)21def _swig_getattr(self,class_type,name):22 if (name == "thisown"): return self.this.own()23 method = class_type.__swig_getmethods__.get(name,None)24 if method: return method(self)25 raise AttributeError,name26def _swig_repr(self):27 try: strthis = "proxy of " + self.this.__repr__()28 except: strthis = ""29 return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)30import types31try:32 _object = types.ObjectType33 _newclass = 134except AttributeError:35 class _object : pass36 _newclass = 037del types38class PySwigIterator(_object):39 __swig_setmethods__ = {}40 __setattr__ = lambda self, name, value: _swig_setattr(self, PySwigIterator, name, value)41 __swig_getmethods__ = {}42 __getattr__ = lambda self, name: _swig_getattr(self, PySwigIterator, name)43 def __init__(self): raise AttributeError, "No constructor defined"44 __repr__ = _swig_repr45 __swig_destroy__ = _pMC_mult.delete_PySwigIterator46 __del__ = lambda self : None;47 def value(*args): return _pMC_mult.PySwigIterator_value(*args)48 def incr(*args): return _pMC_mult.PySwigIterator_incr(*args)49 def decr(*args): return _pMC_mult.PySwigIterator_decr(*args)50 def distance(*args): return _pMC_mult.PySwigIterator_distance(*args)51 def equal(*args): return _pMC_mult.PySwigIterator_equal(*args)52 def copy(*args): return _pMC_mult.PySwigIterator_copy(*args)53 def next(*args): return _pMC_mult.PySwigIterator_next(*args)54 def previous(*args): return _pMC_mult.PySwigIterator_previous(*args)55 def advance(*args): return _pMC_mult.PySwigIterator_advance(*args)56 def __eq__(*args): return _pMC_mult.PySwigIterator___eq__(*args)57 def __ne__(*args): return _pMC_mult.PySwigIterator___ne__(*args)58 def __iadd__(*args): return _pMC_mult.PySwigIterator___iadd__(*args)59 def __isub__(*args): return _pMC_mult.PySwigIterator___isub__(*args)60 def __add__(*args): return _pMC_mult.PySwigIterator___add__(*args)61 def __sub__(*args): return _pMC_mult.PySwigIterator___sub__(*args)62 def __iter__(self): return self63PySwigIterator_swigregister = _pMC_mult.PySwigIterator_swigregister64PySwigIterator_swigregister(PySwigIterator)65class IntVector(_object):66 __swig_setmethods__ = {}67 __setattr__ = lambda self, name, value: _swig_setattr(self, IntVector, name, value)68 __swig_getmethods__ = {}69 __getattr__ = lambda self, name: _swig_getattr(self, IntVector, name)70 __repr__ = _swig_repr71 def iterator(*args): return _pMC_mult.IntVector_iterator(*args)72 def __iter__(self): return self.iterator()73 def __nonzero__(*args): return _pMC_mult.IntVector___nonzero__(*args)74 def __len__(*args): return _pMC_mult.IntVector___len__(*args)75 def pop(*args): return _pMC_mult.IntVector_pop(*args)76 def __getslice__(*args): return _pMC_mult.IntVector___getslice__(*args)77 def __setslice__(*args): return _pMC_mult.IntVector___setslice__(*args)78 def __delslice__(*args): return _pMC_mult.IntVector___delslice__(*args)79 def __delitem__(*args): return _pMC_mult.IntVector___delitem__(*args)80 def __getitem__(*args): return _pMC_mult.IntVector___getitem__(*args)81 def __setitem__(*args): return _pMC_mult.IntVector___setitem__(*args)82 def append(*args): return _pMC_mult.IntVector_append(*args)83 def empty(*args): return _pMC_mult.IntVector_empty(*args)84 def size(*args): return _pMC_mult.IntVector_size(*args)85 def clear(*args): return _pMC_mult.IntVector_clear(*args)86 def swap(*args): return _pMC_mult.IntVector_swap(*args)87 def get_allocator(*args): return _pMC_mult.IntVector_get_allocator(*args)88 def begin(*args): return _pMC_mult.IntVector_begin(*args)89 def end(*args): return _pMC_mult.IntVector_end(*args)90 def rbegin(*args): return _pMC_mult.IntVector_rbegin(*args)91 def rend(*args): return _pMC_mult.IntVector_rend(*args)92 def pop_back(*args): return _pMC_mult.IntVector_pop_back(*args)93 def erase(*args): return _pMC_mult.IntVector_erase(*args)94 def __init__(self, *args): 95 this = _pMC_mult.new_IntVector(*args)96 try: self.this.append(this)97 except: self.this = this98 def push_back(*args): return _pMC_mult.IntVector_push_back(*args)99 def front(*args): return _pMC_mult.IntVector_front(*args)100 def back(*args): return _pMC_mult.IntVector_back(*args)101 def assign(*args): return _pMC_mult.IntVector_assign(*args)102 def resize(*args): return _pMC_mult.IntVector_resize(*args)103 def insert(*args): return _pMC_mult.IntVector_insert(*args)104 def reserve(*args): return _pMC_mult.IntVector_reserve(*args)105 def capacity(*args): return _pMC_mult.IntVector_capacity(*args)106 __swig_destroy__ = _pMC_mult.delete_IntVector107 __del__ = lambda self : None;108IntVector_swigregister = _pMC_mult.IntVector_swigregister109IntVector_swigregister(IntVector)110class DoubleVector(_object):111 __swig_setmethods__ = {}112 __setattr__ = lambda self, name, value: _swig_setattr(self, DoubleVector, name, value)113 __swig_getmethods__ = {}114 __getattr__ = lambda self, name: _swig_getattr(self, DoubleVector, name)115 __repr__ = _swig_repr116 def iterator(*args): return _pMC_mult.DoubleVector_iterator(*args)117 def __iter__(self): return self.iterator()118 def __nonzero__(*args): return _pMC_mult.DoubleVector___nonzero__(*args)119 def __len__(*args): return _pMC_mult.DoubleVector___len__(*args)120 def pop(*args): return _pMC_mult.DoubleVector_pop(*args)121 def __getslice__(*args): return _pMC_mult.DoubleVector___getslice__(*args)122 def __setslice__(*args): return _pMC_mult.DoubleVector___setslice__(*args)123 def __delslice__(*args): return _pMC_mult.DoubleVector___delslice__(*args)124 def __delitem__(*args): return _pMC_mult.DoubleVector___delitem__(*args)125 def __getitem__(*args): return _pMC_mult.DoubleVector___getitem__(*args)126 def __setitem__(*args): return _pMC_mult.DoubleVector___setitem__(*args)127 def append(*args): return _pMC_mult.DoubleVector_append(*args)128 def empty(*args): return _pMC_mult.DoubleVector_empty(*args)129 def size(*args): return _pMC_mult.DoubleVector_size(*args)130 def clear(*args): return _pMC_mult.DoubleVector_clear(*args)131 def swap(*args): return _pMC_mult.DoubleVector_swap(*args)132 def get_allocator(*args): return _pMC_mult.DoubleVector_get_allocator(*args)133 def begin(*args): return _pMC_mult.DoubleVector_begin(*args)134 def end(*args): return _pMC_mult.DoubleVector_end(*args)135 def rbegin(*args): return _pMC_mult.DoubleVector_rbegin(*args)136 def rend(*args): return _pMC_mult.DoubleVector_rend(*args)137 def pop_back(*args): return _pMC_mult.DoubleVector_pop_back(*args)138 def erase(*args): return _pMC_mult.DoubleVector_erase(*args)139 def __init__(self, *args): 140 this = _pMC_mult.new_DoubleVector(*args)141 try: self.this.append(this)142 except: self.this = this143 def push_back(*args): return _pMC_mult.DoubleVector_push_back(*args)144 def front(*args): return _pMC_mult.DoubleVector_front(*args)145 def back(*args): return _pMC_mult.DoubleVector_back(*args)146 def assign(*args): return _pMC_mult.DoubleVector_assign(*args)147 def resize(*args): return _pMC_mult.DoubleVector_resize(*args)148 def insert(*args): return _pMC_mult.DoubleVector_insert(*args)149 def reserve(*args): return _pMC_mult.DoubleVector_reserve(*args)150 def capacity(*args): return _pMC_mult.DoubleVector_capacity(*args)151 __swig_destroy__ = _pMC_mult.delete_DoubleVector152 __del__ = lambda self : None;153DoubleVector_swigregister = _pMC_mult.DoubleVector_swigregister154DoubleVector_swigregister(DoubleVector)155class FloatVector(_object):156 __swig_setmethods__ = {}157 __setattr__ = lambda self, name, value: _swig_setattr(self, FloatVector, name, value)158 __swig_getmethods__ = {}159 __getattr__ = lambda self, name: _swig_getattr(self, FloatVector, name)160 __repr__ = _swig_repr161 def iterator(*args): return _pMC_mult.FloatVector_iterator(*args)162 def __iter__(self): return self.iterator()163 def __nonzero__(*args): return _pMC_mult.FloatVector___nonzero__(*args)164 def __len__(*args): return _pMC_mult.FloatVector___len__(*args)165 def pop(*args): return _pMC_mult.FloatVector_pop(*args)166 def __getslice__(*args): return _pMC_mult.FloatVector___getslice__(*args)167 def __setslice__(*args): return _pMC_mult.FloatVector___setslice__(*args)168 def __delslice__(*args): return _pMC_mult.FloatVector___delslice__(*args)169 def __delitem__(*args): return _pMC_mult.FloatVector___delitem__(*args)170 def __getitem__(*args): return _pMC_mult.FloatVector___getitem__(*args)171 def __setitem__(*args): return _pMC_mult.FloatVector___setitem__(*args)172 def append(*args): return _pMC_mult.FloatVector_append(*args)173 def empty(*args): return _pMC_mult.FloatVector_empty(*args)174 def size(*args): return _pMC_mult.FloatVector_size(*args)175 def clear(*args): return _pMC_mult.FloatVector_clear(*args)176 def swap(*args): return _pMC_mult.FloatVector_swap(*args)177 def get_allocator(*args): return _pMC_mult.FloatVector_get_allocator(*args)178 def begin(*args): return _pMC_mult.FloatVector_begin(*args)179 def end(*args): return _pMC_mult.FloatVector_end(*args)180 def rbegin(*args): return _pMC_mult.FloatVector_rbegin(*args)181 def rend(*args): return _pMC_mult.FloatVector_rend(*args)182 def pop_back(*args): return _pMC_mult.FloatVector_pop_back(*args)183 def erase(*args): return _pMC_mult.FloatVector_erase(*args)184 def __init__(self, *args): 185 this = _pMC_mult.new_FloatVector(*args)186 try: self.this.append(this)187 except: self.this = this188 def push_back(*args): return _pMC_mult.FloatVector_push_back(*args)189 def front(*args): return _pMC_mult.FloatVector_front(*args)190 def back(*args): return _pMC_mult.FloatVector_back(*args)191 def assign(*args): return _pMC_mult.FloatVector_assign(*args)192 def resize(*args): return _pMC_mult.FloatVector_resize(*args)193 def insert(*args): return _pMC_mult.FloatVector_insert(*args)194 def reserve(*args): return _pMC_mult.FloatVector_reserve(*args)195 def capacity(*args): return _pMC_mult.FloatVector_capacity(*args)196 __swig_destroy__ = _pMC_mult.delete_FloatVector197 __del__ = lambda self : None;198FloatVector_swigregister = _pMC_mult.FloatVector_swigregister199FloatVector_swigregister(FloatVector)200class MC(_object):201 __swig_setmethods__ = {}202 __setattr__ = lambda self, name, value: _swig_setattr(self, MC, name, value)203 __swig_getmethods__ = {}204 __getattr__ = lambda self, name: _swig_getattr(self, MC, name)205 __repr__ = _swig_repr206 def __init__(self, *args): 207 this = _pMC_mult.new_MC(*args)208 try: self.this.append(this)209 except: self.this = this210 def calc_pKas(*args): return _pMC_mult.MC_calc_pKas(*args)211 def set_MCsteps(*args): return _pMC_mult.MC_set_MCsteps(*args)212 __swig_destroy__ = _pMC_mult.delete_MC213 __del__ = lambda self : None;214MC_swigregister = _pMC_mult.MC_swigregister...
krylovslow.py
Source:krylovslow.py
...44 prod = fft.irfft(f1*f2, n+1)45 # prod = signal.convolve(p1, p2, method='fft')46 return prod47# define an alias for easy testing48def poly_mult(p1, p2):49 # return poly_mult_slow(p1, p2)50 d1 = p1.shape[0] - 151 d2 = p2.shape[0] - 152 n = d1 + d253 # q1 = np.pad(p1, (0,d2), 'constant')54 # q2 = np.pad(p2, (0,d1), 'constant')55 # assert q1.shape[0] == n+156 # assert q2.shape[0] == n+157 if n >= 128:58 prod = signal.fftconvolve(p1, p2, mode='full')59 else:60 prod = np.convolve(p1, p2)61 # prod = np.convolve(p1, p2)62 # if prod.shape[0] != n+1:63 # print(d1, d2, p1.shape, p2.shape, prod.shape)64 # assert false65 # assert prod.shape[0] == n+166 return prod67def poly_inv(p, n):68 """69 invert p mod x^n70 """71 assert n >= 172 if n == 1:73 return np.array([1 / p[0]])74 # represent p = p_low + x^k p_high, and its inverse q similarly75 d = p.shape[0]76 k = (n+1)//277 # invert the lower order terms78 q_low = poly_inv(p[:min(d,k)], k)79 # print(q_low)80 # since 2k >= n, p q_l + x^k p_l q_h = 1 (mod x^n)81 # so p_l q_h = (1 - p q_l)/x^k (mod x^{n-k})82 r = poly_mult(p, q_low)83 r[0] -= 184 # assert np.all(r[:min(r.shape[0],k)] == 0)85 # but we know p_l^{-1} mod x^{n-k} since we already know it mod x^k86 q_high = poly_mult(-r[k:min(r.shape[0],n)], q_low)87 # q_low = np.pad(q_low, (0,k-q_low.shape[0]), 'constant')88 q = np.concatenate((q_low, q_high))[:n]89 # q = np.trim_zeros(q, 'b')90 return q91def resolvent_bilinear(A, v, u, n):92 """93 Compute [u e_n]^T * (I-Ax)^{-1} * [v e_1]94 (2x2 matrix of rational fractions)95 output: array of shape (2, 2, n), array shape (n)96 (numerator, denominator)97 invariants:98 numerator has degree n-199 denominator degree n100 """101 if n == 1:102 # don't know how write outer product in numpy103 return (np.array([[[ u[0]*v[0] ], [ u[0]*1 ]], [[ 1*v[0] ], [ 1*1 ]]]), np.array([1,-A[0,0]]))104 k = n//2105 # Let M00 = M[0:k, 0:k], M10 = M[k:n, 0:k], M11 = M[k:n,k:n]106 # i.e. M = [M00 0 ; M10 M11] (where M = I-Ax)107 # then M^{-1} = [M00^{-1} 0 ; -M11^{-1} M_10^{-1} M_00^{-1}]108 S0, d0 = resolvent_bilinear(A[:k,:k], v[:k], u[:k], k)109 S1, d1 = resolvent_bilinear(A[k:,k:], v[k:], u[k:], n-k)110 # the part corresponding to bottom left corner is111 # -A[k, k-1]x * u_1^T M_11^{-1} e_1 * e_k^T M_00^{-1} v_0112 # or S1[:,1] * S0[1,:]113 L = np.array([[poly_mult(S1[0,1], S0[1,0]), poly_mult(S1[0,1], S0[1,1])], [poly_mult( S1[1,1], S0[1,0] ), poly_mult( S1[1,1], S0[1,1] )]])114 # print(L)115 L = A[k,k-1] * np.pad(L, ((0,0),(0,0),(1,0)), 'constant') # multiply by X116 # TODO: above padding should be able to be optimized away; when we allocate memory properly can store the coefficients directly in the right place117 # print(L)118 # clear denominators119 # S0 = np.array([[ poly_mult(s, d1) for s in r ] for r in S0])120 # S1 = np.array([[ poly_mult(s, d0) for s in r ] for r in S1])121 # print(S0)122 # really need to define poly matrix operations123 # S = np.array([[poly_add(S0[i,j],S1[i,j]) for j in range(2)] for i in range(2)])124 # S = np.array([[poly_add(S[i,j],L[i,j]) for j in range(2)] for i in range(2)])125 # L[0,0] = poly_add(L[0,0], poly_mult(S0[0,0], d1), n)126 # L[0,1] = poly_add(L[0,1], poly_mult(S0[0,1], d1), n)127 # L[0,0] = poly_add(L[0,0], poly_mult(S1[0,0], d0), n)128 # L[1,0] = poly_add(L[1,0], poly_mult(S1[1,0], d0), n)129 L[0,0] += poly_mult(S0[0,0], d1) + poly_mult(S1[0,0], d0)130 L[0,1] += poly_mult(S0[0,1], d1)131 L[1,0] += poly_mult(S1[1,0], d0)132 return (L, poly_mult(d0,d1))133def krylov_mult(A, v, u, m):134 """135 Compute the matrix-vector product Kry(A, v)^T * u136 A: R^{n \times n}, lower triangular and 2-banded137 u: R^n138 v: R^n139 m: output dimension (i.e. width of K)140 """141 n = v.shape[0]142 assert A.shape == (n,n)143 R, d = resolvent_bilinear(A,v,u,n)144 ans = poly_mult(R[0,0], poly_inv(d, m))145 return ans[:m]146def Amult(d, subd, v):147 ans = d*v148 ans[1:] += subd*v[:-1]149 return ans150def krylov_mult_slow(A, v, u, m):151 n = v.shape[0]152 assert A.shape == (n,n)153 cols = [v]154 d = np.diagonal(A, 0)155 subd = np.diagonal(A, -1)156 for i in range(1,m):157 cols.append(Amult(d, subd, cols[-1]))158 K = np.stack(cols, axis=1)159 return K.T @ u160def krylov_mult_slow_allocated(A, v, u, m):161 n = v.shape[0]162 assert A.shape == (n,n)163 d = np.diagonal(A, 0)164 subd = np.diagonal(A, -1)165 # Allocate memory at once to K166 K_T = np.empty((m, n))167 K_T[0] = v168 for i in range(1,m):169 K_T[i] = Amult(d, subd, K_T[i-1])170 return K_T @ u171def krylov_construct(A, v, m):172 n = v.shape[0]173 assert A.shape == (n,n)174 d = np.diagonal(A, 0)175 subd = np.diagonal(A, -1)176 K = np.zeros(shape=(m,n))177 K[0,:] = v178 for i in range(1,m):179 K[i,1:] = subd*K[i-1,:-1]180 return K181def krylov_mult_slow_faster(A, v, u, m):182 K = krylov_construct(A, v, m)183 return K @ u
mult_funcs.py
Source:mult_funcs.py
1def gen_NN_mult(df,model='MLPRegressor',max_iter=20,solver='adam',kernel='rbf',verbose=False):2 import pandas as pd3 from sklearn.model_selection import train_test_split4 from sklearn.neural_network import MLPRegressor5 from sklearn.svm import SVR6 from sklearn.linear_model import LinearRegression7 from sklearn.neighbors import KNeighborsRegressor8 route = df['LINEID'].unique()[0]9 direction = df['DIRECTION'].unique()[0]10 max_trip = df['TRIPLENGTH'].max()11 min_trip = df['TRIPLENGTH'].min()12 13 print(df['TRIPLENGTH'].min(),df['TRIPLENGTH'].max())14 15# df['TRIPLENGTH'] = (df['TRIPLENGTH'] - df['TRIPLENGTH'].min())/(df['TRIPLENGTH'].max() - df['TRIPLENGTH'].min())16# df['feels_like'] = (df['feels_like'] - df['feels_like'].min())/(df['feels_like'].max() - df['feels_like'].min())17 18 df_rev1 = pd.get_dummies(df.drop(['LINEID','DIRECTION'],axis=1))19 20# print(route, direction)21 # y is the target22 max_trip = df_rev1['TRIPLENGTH'].max()23 min_trip = df_rev1['TRIPLENGTH'].min()24 y = (df_rev1['TRIPLENGTH'] - min_trip)/(max_trip - min_trip)25 # X is everything else26 X = df_rev1.drop(["TRIPLENGTH"],1)27 28# Normalsie feels_like29 max_feels_like = X['feels_like'].max()30 min_feels_like = X['feels_like'].min()31 X['feels_like'] = (X['feels_like'] - min_feels_like)/(max_feels_like - min_feels_like)32 # Split the dataset into two datasets: 70% training and 30% test33 X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1, test_size=0.3)34# timetabled_values_train = X_train['PLANNEDTIME_ARR'] - X_train['PLANNEDTIME_DEP']35# timetabled_values_test = X_test['PLANNEDTIME_ARR'] - X_test['PLANNEDTIME_DEP']36 37# X_train.drop('PLANNEDTIME_ARR',axis=1,inplace=True)38# X_test.drop('PLANNEDTIME_ARR',axis=1,inplace=True)39# X_train.drop('PLANNEDTIME_DEP',axis=1,inplace=True)40# X_test.drop('PLANNEDTIME_DEP',axis=1,inplace=True)41 42# print("original range is: ",df_rev1.shape[0])43# print("training range (70%):\t rows 0 to", round(X_train.shape[0]))44# print("test range (30%): \t rows", round(X_train.shape[0]), "to", round(X_train.shape[0]) + X_test.shape[0])45 46# need to reset the index to allow contatenation with predicted values otherwise not joining on same index...47 X_train.reset_index(drop=True, inplace=True)48 y_train.reset_index(drop=True, inplace=True)49 X_test.reset_index(drop=True, inplace=True)50 y_test.reset_index(drop=True, inplace=True)51 X_train.head(5)52 53 timetabled_values_train = X_train['PLANNEDTIME_ARR'] - X_train['PLANNEDTIME_DEP']54 timetabled_values_test = X_test['PLANNEDTIME_ARR'] - X_test['PLANNEDTIME_DEP']55 X_train.drop('PLANNEDTIME_ARR',axis=1,inplace=True)56 X_test.drop('PLANNEDTIME_ARR',axis=1,inplace=True)57 X_train.drop('PLANNEDTIME_DEP',axis=1,inplace=True)58 X_test.drop('PLANNEDTIME_DEP',axis=1,inplace=True)59 60 if model == 'MLPRegressor':61 output_model = MLPRegressor(max_iter=max_iter,solver=solver,).fit(X_train, y_train)62 elif model == 'SVR':63 output_model = SVR(kernel=kernel).fit(X_train, y_train)64 elif model == 'LinearRegression':65 output_model = LinearRegression().fit(X_train, y_train)66 elif model == 'KNeighborsRegressor':67 output_model = KNeighborsRegressor().fit(X_train,y_train)68 69# test_train_dict[key] = {70# 'X_train':X_train,71# 'X_test':X_test,72# 'y_train':y_train,73# 'y_test':y_test,74# 'timetabled_values_train':timetabled_values_train,75# 'timetabled_values_test':timetabled_values_test76# }77 78 79 output = {80 'route':route,81 'direction':direction,82 'model':output_model,83 'X_train':X_train,84 'X_test':X_test,85 'y_train':y_train,86 'y_test':y_test,87 'timetabled_values_train':timetabled_values_train/max_trip,88 'timetabled_values_test':timetabled_values_test/max_trip,89 'max_trip':max_trip,90 'min_trip':min_trip,91 'max_feels_like':max_feels_like,92 'min_feels_like':min_feels_like93 }94 if verbose == True:95 print('Done: \t%s\t%s'%(route,direction))96 return output97def gen_NN_mult_200(df):98 return gen_NN_mult(df,max_iter=200)99def gen_NN_mult_500(df):100 return gen_NN_mult(df,max_iter=500)101def gen_NN_mult_1000(df):102 return gen_NN_mult(df,max_iter=1000)103def gen_NN_mult_2000(df):104 return gen_NN_mult(df,max_iter=2000)105def gen_NN_mult_4000(df):106 return gen_NN_mult(df,max_iter=4000)107def gen_NN_mult_6000(df):108 return gen_NN_mult(df,max_iter=6000)109def gen_NN_mult_8000(df):110 return gen_NN_mult(df,max_iter=8000)#111def gen_NN_mult_200_lbfgs(df):112 return gen_NN_mult(df,max_iter=200,solver='lbfgs')113def gen_NN_mult_1000_lbfgs(df):114 return gen_NN_mult(df,max_iter=1000,solver='lbfgs')115def gen_NN_mult_2000_lbfgs(df):116 return gen_NN_mult(df,max_iter=1000,solver='lbfgs')117def gen_SVR_mult_linear(df):118 return gen_NN_mult(df,model='SVR',kernel='linear')119def gen_SVR_mult(df):120 return gen_NN_mult(df,model='SVR',verbose=True)121def gen_LR_mult(df):122 return gen_NN_mult(df,model='LinearRegression')123def gen_KNR_mult(df):...
mult2_mix.py
Source:mult2_mix.py
1# ##### BEGIN GPL LICENSE BLOCK #####2#3# This program is free software; you can redistribute it and/or4# modify it under the terms of the GNU General Public License5# as published by the Free Software Foundation; either version 26# of the License, or (at your option) any later version.7#8# This program is distributed in the hope that it will be useful,9# but WITHOUT ANY WARRANTY; without even the implied warranty of10# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the11# GNU General Public License for more details.12#13# You should have received a copy of the GNU General Public License14# along with this program; if not, write to the Free Software Foundation,15# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.16#17# ##### END GPL LICENSE BLOCK #####18# Copyright (C) 2015: SCS Software19import bpy20from io_scs_tools.consts import Material as _MAT_consts21MULT2_MIX_G = _MAT_consts.node_group_prefix + "Mult2MixGroup"22_SEPARATE_MULT_NODE = "SeparateMult"23_MULT_GREEN_SCALE_NODE = "MultGScale"24_MULT_GREEN_MIX_NODE = "MultGMix"25_MULT_BASE_MULT_NODE = "MultBaseMult"26_ALPHA_MIX_NODE = "AlphaMix"27def get_node_group():28 """Gets node group for calcualtion of environment addition color.29 :return: node group which calculates environment addition color30 :rtype: bpy.types.NodeGroup31 """32 if MULT2_MIX_G not in bpy.data.node_groups:33 __create_node_group__()34 return bpy.data.node_groups[MULT2_MIX_G]35def __create_node_group__():36 """Creates mult2 mix group.37 Inputs: Base Tex Color, Base Tex Alpha, Mult Tex Color, Mult Tex Alpha38 Outputs: Mix Color, Mix Alpha39 """40 start_pos_x = 041 start_pos_y = 042 pos_x_shift = 18543 mult2_mix_g = bpy.data.node_groups.new(type="ShaderNodeTree", name=MULT2_MIX_G)44 # inputs defining45 mult2_mix_g.inputs.new("NodeSocketFloat", "Base Alpha")46 mult2_mix_g.inputs.new("NodeSocketColor", "Base Color")47 mult2_mix_g.inputs.new("NodeSocketFloat", "Mult Alpha")48 mult2_mix_g.inputs.new("NodeSocketColor", "Mult Color")49 input_n = mult2_mix_g.nodes.new("NodeGroupInput")50 input_n.location = (start_pos_x - pos_x_shift, start_pos_y)51 # outputs defining52 mult2_mix_g.outputs.new("NodeSocketFloat", "Mix Alpha")53 mult2_mix_g.outputs.new("NodeSocketColor", "Mix Color")54 output_n = mult2_mix_g.nodes.new("NodeGroupOutput")55 output_n.location = (start_pos_x + pos_x_shift * 6, start_pos_y)56 # nodes creation57 separate_mult_n = mult2_mix_g.nodes.new("ShaderNodeSeparateRGB")58 separate_mult_n.name = _SEPARATE_MULT_NODE59 separate_mult_n.label = _SEPARATE_MULT_NODE60 separate_mult_n.location = (start_pos_x + pos_x_shift, start_pos_y)61 mult_green_scale_n = mult2_mix_g.nodes.new("ShaderNodeMath")62 mult_green_scale_n.name = _MULT_GREEN_SCALE_NODE63 mult_green_scale_n.label = _MULT_GREEN_SCALE_NODE64 mult_green_scale_n.location = (start_pos_x + pos_x_shift * 2, start_pos_y)65 mult_green_scale_n.operation = "MULTIPLY"66 mult_green_scale_n.inputs[1].default_value = 2.067 mult_green_mix_n = mult2_mix_g.nodes.new("ShaderNodeMixRGB")68 mult_green_mix_n.name = _MULT_GREEN_MIX_NODE69 mult_green_mix_n.label = _MULT_GREEN_MIX_NODE70 mult_green_mix_n.location = (start_pos_x + pos_x_shift * 3, start_pos_y + 200)71 mult_green_mix_n.blend_type = "MIX"72 mult_green_mix_n.inputs["Color2"].default_value = (1.0,) * 473 mult_base_mult_n = mult2_mix_g.nodes.new("ShaderNodeMixRGB")74 mult_base_mult_n.name = _MULT_BASE_MULT_NODE75 mult_base_mult_n.label = _MULT_BASE_MULT_NODE76 mult_base_mult_n.location = (start_pos_x + pos_x_shift * 4, start_pos_y + 400)77 mult_base_mult_n.blend_type = "MULTIPLY"78 mult_base_mult_n.inputs["Fac"].default_value = 1.079 alpha_mix_n = mult2_mix_g.nodes.new("ShaderNodeMixRGB")80 alpha_mix_n.name = _ALPHA_MIX_NODE81 alpha_mix_n.label = _ALPHA_MIX_NODE82 alpha_mix_n.location = (start_pos_x + pos_x_shift, start_pos_y - 200)83 # links creation84 mult2_mix_g.links.new(separate_mult_n.inputs["Image"], input_n.outputs["Mult Color"])85 mult2_mix_g.links.new(mult_green_scale_n.inputs[0], separate_mult_n.outputs["G"])86 mult2_mix_g.links.new(mult_green_mix_n.inputs["Fac"], input_n.outputs["Base Alpha"])87 mult2_mix_g.links.new(mult_green_mix_n.inputs["Color1"], mult_green_scale_n.outputs["Value"])88 mult2_mix_g.links.new(mult_base_mult_n.inputs["Color1"], input_n.outputs["Base Color"])89 mult2_mix_g.links.new(mult_base_mult_n.inputs["Color2"], mult_green_mix_n.outputs["Color"])90 mult2_mix_g.links.new(alpha_mix_n.inputs["Fac"], input_n.outputs["Base Alpha"])91 mult2_mix_g.links.new(alpha_mix_n.inputs["Color1"], input_n.outputs["Mult Alpha"])92 mult2_mix_g.links.new(alpha_mix_n.inputs["Color2"], input_n.outputs["Base Alpha"])93 mult2_mix_g.links.new(output_n.inputs["Mix Color"], mult_base_mult_n.outputs["Color"])...
mvm_make_level_sounds.py
Source:mvm_make_level_sounds.py
1import os, sys, string, re2def get_soundlevel( token ):3 if token.find( "SNDLVL_NONE" ) > -1:4 return 0.0 5 if token.find( "SNDLVL_TALKING" ) > -1:6 return 60.07 if token.find( "SNDLVL_STATIC" ) > -1:8 return 66.09 if token.find( "SNDLVL_NORM" ) > -1:10 return 75.011 if token.find( "SNDLVL_GUNFIRE" ) > -1:12 return 140.013# print token14 if token.find("SNDLVL_") > -1:15 token = token[7:]16# print token 17 if token.find("dB") > -1:18 token = token[:-2]19 if token.find("db") > -1:20 token = token[:-2]21 if token.find("Db") > -1:22 token = token[:-2]23 if token.find("DB") > -1:24 token = token[:-2]25 print token 26 return string.atof( token )27 28def mult_values( s, mult ):29 if s.find("VOL_NORM") > -1:30 return "\"%f\"" % (mult)31 32 value_split = s.split(",")33 value_min = string.atof(value_split[0])34 if len(value_split) > 1:35 value_max = string.atof(value_split[1])36 return "\"%f, %f\"" % (value_min * mult, value_max * mult)37 return "\"%f\"" % (value_min * mult)38def mult_line( line, token, mult ):39 line_split = line.split( )40 if len(line_split) > 1:41 match = line_split[0].find( token )42 if match > -1:43 if token.find( "soundlevel" ) > -1:44 mult_result = get_soundlevel( line_split[1][1:-1] )45 file_output.write( "\t\"soundlevel\"\t\"SNDLVL_%idB\"\n" % ( mult_result * mult ) )46 return 147 else:48 mult_result = mult_values( line_split[1][1:-1], mult )49 file_output.write( "\t\"%s\"\t%s\n" % ( token, mult_result ) )50 return 151 return 052file_output = open("mvm_level_sounds.txt", "w" )53file_output.write("// THIS FILE IS AUTOMATICALLY GENERATED VIA mvm_make_level_sounds.py!!!\n// DO NOT EDIT BY HAND!\n\n\n\n")54default_volume_mult = 0.755file_input = open("game_sounds_weapons.txt")56file_lines = file_input.readlines()57for line in file_lines:58 matched = mult_line( line, "volume", default_volume_mult )59# if matched == 0:60# matched = mult_line( line, "soundlevel", 0.9 ) 61 if matched == 0: 62 file_output.write( line )63file_input.close() 64file_input = open("game_sounds_player.txt")65file_lines = file_input.readlines()66for line in file_lines:67 matched = mult_line( line, "volume", default_volume_mult )68# if matched == 0:69# matched = mult_line( line, "soundlevel", 0.9 ) 70 if matched == 0: 71 file_output.write( line )72file_input.close() 73file_input = open("game_sounds_physics.txt")74file_lines = file_input.readlines()75for line in file_lines:76 matched = mult_line( line, "volume", default_volume_mult )77# if matched == 0:78# matched = mult_line( line, "soundlevel", 0.9 ) 79 if matched == 0: 80 file_output.write( line )81file_input.close() 82file_input = open("game_sounds_footsteps.txt")83file_lines = file_input.readlines()84for line in file_lines:85 matched = mult_line( line, "volume", default_volume_mult * 0.5)86# if matched == 0:87# matched = mult_line( line, "soundlevel", 0.9 ) 88 if matched == 0: 89 file_output.write( line )90file_input.close() 91file_input = open("game_sounds.txt")92file_lines = file_input.readlines()93for line in file_lines:94 matched = mult_line( line, "volume", default_volume_mult )95# if matched == 0:96# matched = mult_line( line, "soundlevel", 0.9 ) 97 if matched == 0: 98 file_output.write( line )99file_input.close() 100# file_input = open("game_sounds_vo.txt")101# file_lines = file_input.readlines()102# for line in file_lines:103# matched = mult_line( line, "volume", default_volume_mult )104# # if matched == 0:105# # matched = mult_line( line, "soundlevel", 0.9 ) 106# if matched == 0: 107# file_output.write( line )108# file_input.close() 109# file_input = open("game_sounds_vo_handmade.txt")110# file_lines = file_input.readlines()111# for line in file_lines:112# matched = mult_line( line, "volume", default_volume_mult )113# # if matched == 0:114# # matched = mult_line( line, "soundlevel", 0.9 ) 115# if matched == 0: 116# file_output.write( line )117# file_input.close() 118file_output.close() ...
Using AI Code Generation
1const { chromium } = require('playwright');2const { mult } = require('playwright/lib/utils/rect');3(async () => {4 const browser = await chromium.launch();5 const context = await browser.newContext();6 const page = await context.newPage();7 const rect = await page.evaluate(() => {8 const div = document.createElement('div');9 div.style.width = '100px';10 div.style.height = '100px';11 div.style.position = 'absolute';12 div.style.top = '100px';13 div.style.left = '100px';14 document.body.appendChild(div);15 return div.getBoundingClientRect();16 });17 console.log(mult(rect, 2));18 await browser.close();19})();20const { chromium } = require('playwright');21const { mult } = require('playwright/lib/utils/rect');22(async () => {23 const browser = await chromium.launch();24 const context = await browser.newContext();25 const page = await context.newPage();26 const rect = await page.evaluate(() => {27 const div = document.createElement('div');28 div.style.width = '100px';29 div.style.height = '100px';30 div.style.position = 'absolute';31 div.style.top = '100px';32 div.style.left = '100px';33 document.body.appendChild(div);34 return div.getBoundingClientRect();35 });36 console.log(mult(rect, 2, { x: 100, y: 100 }));37 await browser.close();38})();39const { chromium } = require('playwright');40const { mult } = require('playwright/lib/utils/rect');41(async () => {42 const browser = await chromium.launch();43 const context = await browser.newContext();44 const page = await context.newPage();45 const rect = await page.evaluate(() => {
Using AI Code Generation
1const { InternalAPI } = require('playwright/lib/server/frames');2InternalAPI.mult = (a, b) => a * b;3const { InternalAPI } = require('playwright/lib/server/frames');4InternalAPI.mult = (a, b) => a * b;5const { InternalAPI } = require('playwright/lib/server/frames');6InternalAPI.mult = (a, b) => a * b;7const { InternalAPI } = require('playwright/lib/server/frames');8InternalAPI.mult = (a, b) => a * b;9const { InternalAPI } = require('playwright/lib/server/frames');10InternalAPI.mult = (a, b) => a * b;11const { InternalAPI } = require('playwright/lib/server/frames');12InternalAPI.mult = (a, b) => a * b;13const { InternalAPI } = require('playwright/lib/server/frames');14InternalAPI.mult = (a, b) => a * b;15const { InternalAPI } = require('playwright/lib/server/frames');16InternalAPI.mult = (a, b) => a * b;17const { InternalAPI } = require('playwright/lib/server/frames');18InternalAPI.mult = (a, b) => a * b;19const { InternalAPI } = require('playwright/lib/server/frames');20InternalAPI.mult = (a, b) => a * b;21const { InternalAPI } = require('playwright/lib/server/frames');22InternalAPI.mult = (a, b) => a * b;23const { InternalAPI } = require('playwright/lib/server/frames');24InternalAPI.mult = (a, b) => a * b;25const { InternalAPI }
Using AI Code Generation
1const { mult } = require('@playwright/test/lib/server/frames');2const { chromium } = require('playwright');3const { expect } = require('chai');4(async () => {5 const browser = await chromium.launch();6 const context = await browser.newContext();7 const page = await context.newPage();8 const [response] = await Promise.all([9 page.click('text="Get started"'),10 ]);11 await browser.close();12})();
LambdaTest’s Playwright tutorial will give you a broader idea about the Playwright automation framework, its unique features, and use cases with examples to exceed your understanding of Playwright testing. This tutorial will give A to Z guidance, from installing the Playwright framework to some best practices and advanced concepts.
Get 100 minutes of automation test minutes FREE!!