second
This commit is contained in:
parent
ee583fabb0
commit
374b550a08
|
@ -0,0 +1,104 @@
|
|||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
MANIFEST
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
db.sqlite3
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# pyenv
|
||||
.python-version
|
||||
|
||||
# celery beat schedule file
|
||||
celerybeat-schedule
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# Environments
|
||||
.env
|
||||
.venv
|
||||
env/
|
||||
venv/
|
||||
ENV/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
|
@ -0,0 +1,154 @@
|
|||
date,death,recover,comfirmed
|
||||
5/1/20,68141,164015,1115946
|
||||
5/2/20,69872,175382,1143296
|
||||
5/3/20,71062,180152,1167593
|
||||
38462,72442,187180,1191678
|
||||
38492,74684,189791,1216209
|
||||
38523,76998,189910,1240769
|
||||
38553,78927,195036,1268180
|
||||
38584,80690,198993,1295019
|
||||
38615,82158,212534,1320155
|
||||
38645,83136,216169,1339022
|
||||
38676,84178,232733,1358293
|
||||
38706,85777,230287,1381241
|
||||
5/13/20,87504,243430,1401649
|
||||
5/14/20,89300,246414,1428467
|
||||
5/15/20,90968,250747,1453214
|
||||
5/16/20,92166,268376,1477373
|
||||
5/17/20,92987,272265,1495736
|
||||
5/18/20,94214,283178,1518126
|
||||
5/19/20,95670,289392,1539133
|
||||
5/20/20,97171,294312,1561830
|
||||
5/21/20,98356,298418,1587596
|
||||
5/22/20,99573,350135,1611253
|
||||
5/23/20,100663,361239,1632364
|
||||
5/24/20,101302,366736,1652431
|
||||
5/25/20,101894,379157,1671104
|
||||
5/26/20,102571,384902,1690754
|
||||
5/27/20,104034,391508,1709303
|
||||
5/28/20,105135,399991,1731625
|
||||
5/29/20,106268,406446,1756098
|
||||
5/30/20,107222,416461,1779731
|
||||
5/31/20,107837,444758,1798718
|
||||
38737,108613,458231,1816154
|
||||
38768,109594,463868,1837656
|
||||
38796,110604,479258,1857511
|
||||
38827,111617,485002,1879150
|
||||
38857,112508,491706,1904550
|
||||
38888,113150,500849,1925710
|
||||
38918,113609,506367,1943626
|
||||
38949,114116,518522,1961263
|
||||
38980,115029,524855,1979647
|
||||
39010,115905,533504,2000757
|
||||
39041,116735,540292,2023890
|
||||
39071,117564,547386,2048756
|
||||
6/13/20,118299,556606,2073964
|
||||
6/14/20,118643,561816,2092912
|
||||
6/15/20,119039,576334,2112731
|
||||
6/16/20,119853,583503,2136401
|
||||
6/17/20,120592,592191,2163465
|
||||
6/18/20,121281,599115,2191991
|
||||
6/19/20,121910,606715,2223553
|
||||
6/20/20,122469,617460,2255823
|
||||
6/21/20,122786,622133,2280971
|
||||
6/22/20,123193,640198,2313123
|
||||
6/23/20,123941,647548,2350198
|
||||
6/24/20,124678,656161,2386074
|
||||
6/25/20,125215,663562,2426391
|
||||
6/26/20,125844,670809,2472385
|
||||
6/27/20,126347,679308,2513731
|
||||
6/28/20,126665,685164,2554461
|
||||
6/29/20,127050,705203,2595744
|
||||
6/30/20,127603,720631,2642174
|
||||
39102,128307,729994,2693993
|
||||
39133,129020,781970,2750622
|
||||
39161,129690,790404,2801983
|
||||
39192,130005,894325,2847664
|
||||
39222,130331,906763,2898432
|
||||
39253,130707,924148,2941517
|
||||
39283,131869,936476,3002171
|
||||
39314,132694,953462,3062290
|
||||
39345,133714,969111,3124786
|
||||
39375,134531,983185,3192841
|
||||
39406,135259,995576,3252874
|
||||
39436,135731,1006326,3311312
|
||||
7/13/20,136176,1031939,3370208
|
||||
7/14/20,137104,1049098,3438244
|
||||
7/15/20,138079,1075882,3506364
|
||||
7/16/20,139035,1090645,3582184
|
||||
7/17/20,139959,1107204,3654445
|
||||
7/18/20,140816,1122720,3716980
|
||||
7/19/20,141297,1131121,3777456
|
||||
7/20/20,141860,1160087,3839546
|
||||
7/21/20,142951,1182018,3904066
|
||||
7/22/20,144175,1210849,3974630
|
||||
7/23/20,145245,1233269,4043070
|
||||
7/24/20,146352,1261624,4116393
|
||||
7/25/20,147282,1279414,4181308
|
||||
7/26/20,147813,1297863,4236083
|
||||
7/27/20,148939,1325804,4292934
|
||||
7/28/20,150269,1355363,4359391
|
||||
7/29/20,151683,1389425,4431244
|
||||
7/30/20,152914,1414155,4498701
|
||||
7/31/20,154151,1438160,4567420
|
||||
39467,155244,1461885,4623604
|
||||
39498,155681,1468689,4669149
|
||||
39527,156262,1513446,4714678
|
||||
39558,157544,1528979,4773479
|
||||
39588,158982,1577851,4827936
|
||||
39619,160226,1598624,4887293
|
||||
39649,161463,1623870,4946590
|
||||
39680,162541,1643118,5000709
|
||||
39711,163065,1656864,5046463
|
||||
39741,163700,1670755,5094087
|
||||
39772,164720,1714960,5142088
|
||||
39802,166219,1753760,5198137
|
||||
8/13/20,167286,1774648,5249451
|
||||
8/14/20,168622,1796326,5314791
|
||||
8/15/20,169622,1818527,5361712
|
||||
8/16/20,170239,1833067,5400904
|
||||
8/17/20,170732,1865580,5437580
|
||||
8/18/20,171958,1898159,5482614
|
||||
8/19/20,173297,1925049,5529973
|
||||
8/20/20,174400,1947035,5574013
|
||||
8/21/20,175491,1965056,5622842
|
||||
8/22/20,176400,1985484,5665887
|
||||
8/23/20,176945,1997761,5700119
|
||||
8/24/20,177418,2020774,5736641
|
||||
8/25/20,178654,2053699,5777001
|
||||
8/26/20,179820,2084465,5822167
|
||||
8/27/20,180936,2101326,5867547
|
||||
8/28/20,181913,2118367,5914395
|
||||
8/29/20,182814,2140614,5957126
|
||||
8/30/20,183268,2153939,5991507
|
||||
8/31/20,183810,2184825,6026895
|
||||
39833,184838,2202663,6068759
|
||||
39864,185917,2231757,6109773
|
||||
39892,186954,2266957,6153983
|
||||
39923,187912,2283454,6204376
|
||||
39953,188674,2302187,6247464
|
||||
39984,189144,2315995,6278633
|
||||
40014,189440,2333551,6302200
|
||||
40045,189892,2359111,6329593
|
||||
40076,191052,2387479,6363650
|
||||
40106,191968,2403511,6399723
|
||||
40137,193142,2417878,6447501
|
||||
40167,193844,2434658,6488563
|
||||
9/13/20,194279,2451406,6522914
|
||||
9/14/20,194710,2474570,6557342
|
||||
9/15/20,195910,2495127,6596849
|
||||
9/16/20,196866,2525573,6635867
|
||||
9/17/20,197727,2540334,6681004
|
||||
9/18/20,198623,2556465,6730288
|
||||
9/19/20,199355,2577446,6772447
|
||||
9/20/20,199620,2590671,6810862
|
||||
9/21/20,200047,2615949,6862834
|
||||
9/22/20,201079,2646959,6902696
|
||||
9/23/20,202135,2670256,6941758
|
||||
9/24/20,203032,2710183,6988869
|
||||
9/25/20,203976,2727335,7037151
|
||||
9/26/20,204744,2750459,7081803
|
||||
9/27/20,205059,2766280,7119311
|
||||
9/28/20,205417,2794608,7152546
|
||||
9/29/20,206251,2813305,7195994
|
||||
9/30/20,207209,2840688,7235428
|
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1,378 @@
|
|||
# classifying_names_with_a_character-level_RNN
|
||||
|
||||
## papers
|
||||
|
||||
```shell
|
||||
The Unreasonable Effectiveness of Recurrent Neural Networks
|
||||
https://karpathy.github.io/2015/05/21/rnn-effectiveness/
|
||||
|
||||
Understanding LSTM Networks
|
||||
https://colah.github.io/posts/2015-08-Understanding-LSTMs/
|
||||
```
|
||||
|
||||
## dataset
|
||||
|
||||
https://download.pytorch.org/tutorial/data.zip
|
||||
|
||||
```shell
|
||||
unzip data.zip
|
||||
```
|
||||
Included in the ``data/names`` directory are 18 text files named as
|
||||
"[Language].txt". Each file contains a bunch of names, one name per
|
||||
line, mostly romanized (but we still need to convert from Unicode to
|
||||
ASCII).
|
||||
|
||||
We'll end up with a dictionary of lists of names per language,
|
||||
``{language: [names ...]}``. The generic variables "category" and "line"
|
||||
(for language and name in our case) are used for later extensibility.
|
||||
|
||||
|
||||
## how to run
|
||||
|
||||
```shell
|
||||
bash run.sh
|
||||
```
|
||||
|
||||
## output
|
||||
|
||||
confusion
|
||||
![Alt](./output/confusion.png)
|
||||
epoch acc
|
||||
![Alt](./output/epoch_acc.jpg)
|
||||
epoch loss
|
||||
![Alt](./output/epoch_loss.jpg)
|
||||
step acc
|
||||
![Alt](./output/step_acc.jpg)
|
||||
step loss
|
||||
![Alt](./output/step_loss.jpg)
|
||||
|
||||
output
|
||||
```shell
|
||||
RNN(
|
||||
(input_to_hidden): Linear(in_features=185, out_features=128, bias=True)
|
||||
(input_to_output): Linear(in_features=185, out_features=18, bias=True)
|
||||
(softmax): LogSoftmax()
|
||||
)
|
||||
config:
|
||||
early_stop_epoch : True
|
||||
print_every : 5
|
||||
num_workers : 4
|
||||
train_load_check_point_file : True
|
||||
device : cpu
|
||||
epoch_only : True
|
||||
epochs : 100
|
||||
early_stop_step_limit : 100
|
||||
data_path : ./data/names
|
||||
optimizer : SGD
|
||||
steps : 100000
|
||||
eval_epoch_steps : 10
|
||||
train_epoch_steps : 10
|
||||
early_stop_step : True
|
||||
max_epoch_stop : True
|
||||
n_hidden : 128
|
||||
loss : NLL
|
||||
all_letters : abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ .,;'
|
||||
dataset : names
|
||||
early_stop_epoch_limit : 10
|
||||
learn_rate : 0.005
|
||||
max_step_stop : True
|
||||
n_letters : 57
|
||||
batch_size : 1000
|
||||
momentum : 0.9
|
||||
[E:0/100] [S:5/100000] [Train Loss:2.570616 Acc:0.252000 5/10 (50%)] [Val Loss:2.511683 Acc:0.267000 2670/10000 (27%)] [Best Epoch:0 Loss:2.511683 Acc:0.267000] [Best Step:5 Loss:2.511683 Acc:0.267000] Step status
|
||||
[E:0/100] [S:10/100000] [Train Loss:2.207740 Acc:0.324000 10/10 (100%)] [Val Loss:2.149015 Acc:0.363700 3637/10000 (36%)] [Best Epoch:0 Loss:2.149015 Acc:0.363700] [Best Step:10 Loss:2.149015 Acc:0.363700] Step status
|
||||
[E:0/100] [S:10/100000] [Train Loss:2.543881 Acc:0.245300] [Val Loss:2.156177 Acc:0.352700 3527/10000 (35%)] [Best Epoch:0 Loss:2.149015 Acc:0.363700] [Best Step:10 Loss:2.149015 Acc:0.363700] [20.09s 20.1s] Epoch status
|
||||
[E:1/100] [S:15/100000] [Train Loss:2.027740 Acc:0.356000 5/10 (50%)] [Val Loss:1.972278 Acc:0.393200 3932/10000 (39%)] [Best Epoch:1 Loss:1.972278 Acc:0.393200] [Best Step:15 Loss:1.972278 Acc:0.393200] Step status
|
||||
[E:1/100] [S:20/100000] [Train Loss:1.919879 Acc:0.383000 10/10 (100%)] [Val Loss:1.903391 Acc:0.394900 3949/10000 (39%)] [Best Epoch:1 Loss:1.903391 Acc:0.394900] [Best Step:20 Loss:1.903391 Acc:0.394900] Step status
|
||||
[E:1/100] [S:20/100000] [Train Loss:2.012543 Acc:0.376500] [Val Loss:1.908830 Acc:0.393500 3935/10000 (39%)] [Best Epoch:1 Loss:1.903391 Acc:0.394900] [Best Step:20 Loss:1.903391 Acc:0.394900] [20.33s 40.4s] Epoch status
|
||||
[E:2/100] [S:25/100000] [Train Loss:1.767248 Acc:0.437000 5/10 (50%)] [Val Loss:1.778559 Acc:0.439400 4394/10000 (44%)] [Best Epoch:2 Loss:1.778559 Acc:0.439400] [Best Step:25 Loss:1.778559 Acc:0.439400] Step status
|
||||
[E:2/100] [S:30/100000] [Train Loss:1.676505 Acc:0.506000 10/10 (100%)] [Val Loss:1.618007 Acc:0.490700 4907/10000 (49%)] [Best Epoch:2 Loss:1.618007 Acc:0.490700] [Best Step:30 Loss:1.618007 Acc:0.490700] Step status
|
||||
[E:2/100] [S:30/100000] [Train Loss:1.749471 Acc:0.455400] [Val Loss:1.652715 Acc:0.476300 4763/10000 (48%)] [Best Epoch:2 Loss:1.618007 Acc:0.490700] [Best Step:30 Loss:1.618007 Acc:0.490700] [19.68s 60.1s] Epoch status
|
||||
[E:3/100] [S:35/100000] [Train Loss:1.642488 Acc:0.491000 5/10 (50%)] [Val Loss:1.594933 Acc:0.507200 5072/10000 (51%)] [Best Epoch:3 Loss:1.594933 Acc:0.507200] [Best Step:35 Loss:1.594933 Acc:0.507200] Step status
|
||||
[E:3/100] [S:40/100000] [Train Loss:1.563026 Acc:0.508000 10/10 (100%)] [Val Loss:1.536687 Acc:0.515200 5152/10000 (52%)] [Best Epoch:3 Loss:1.536687 Acc:0.515200] [Best Step:40 Loss:1.536687 Acc:0.515200] Step status
|
||||
[E:3/100] [S:40/100000] [Train Loss:1.591269 Acc:0.503300] [Val Loss:1.525412 Acc:0.522100 5221/10000 (52%)] [Best Epoch:3 Loss:1.525412 Acc:0.522100] [Best Step:40 Loss:1.536687 Acc:0.515200] [20.08s 80.2s] Epoch status
|
||||
[E:4/100] [S:45/100000] [Train Loss:1.528413 Acc:0.527000 5/10 (50%)] [Val Loss:1.474179 Acc:0.529500 5295/10000 (53%)] [Best Epoch:4 Loss:1.474179 Acc:0.529500] [Best Step:45 Loss:1.474179 Acc:0.529500] Step status
|
||||
[E:4/100] [S:50/100000] [Train Loss:1.515184 Acc:0.549000 10/10 (100%)] [Val Loss:1.465506 Acc:0.539800 5398/10000 (54%)] [Best Epoch:4 Loss:1.465506 Acc:0.539800] [Best Step:50 Loss:1.465506 Acc:0.539800] Step status
|
||||
[E:4/100] [S:50/100000] [Train Loss:1.519756 Acc:0.529800] [Val Loss:1.479681 Acc:0.537800 5378/10000 (54%)] [Best Epoch:4 Loss:1.465506 Acc:0.539800] [Best Step:50 Loss:1.465506 Acc:0.539800] [20.18s 100.4s] Epoch status
|
||||
[E:5/100] [S:55/100000] [Train Loss:1.443934 Acc:0.542000 5/10 (50%)] [Val Loss:1.405279 Acc:0.567200 5672/10000 (57%)] [Best Epoch:5 Loss:1.405279 Acc:0.567200] [Best Step:55 Loss:1.405279 Acc:0.567200] Step status
|
||||
[E:5/100] [S:60/100000] [Train Loss:1.416611 Acc:0.557000 10/10 (100%)] [Val Loss:1.416860 Acc:0.547900 5479/10000 (55%)] [Best Epoch:5 Loss:1.405279 Acc:0.567200] [Best Step:55 Loss:1.405279 Acc:0.567200] Step status
|
||||
[E:5/100] [S:60/100000] [Train Loss:1.443520 Acc:0.548000] [Val Loss:1.418511 Acc:0.554200 5542/10000 (55%)] [Best Epoch:5 Loss:1.405279 Acc:0.567200] [Best Step:55 Loss:1.405279 Acc:0.567200] [20.02s 120.4s] Epoch status
|
||||
[E:6/100] [S:65/100000] [Train Loss:1.392056 Acc:0.568000 5/10 (50%)] [Val Loss:1.381336 Acc:0.572200 5722/10000 (57%)] [Best Epoch:6 Loss:1.381336 Acc:0.572200] [Best Step:65 Loss:1.381336 Acc:0.572200] Step status
|
||||
[E:6/100] [S:70/100000] [Train Loss:1.374859 Acc:0.584000 10/10 (100%)] [Val Loss:1.374447 Acc:0.576600 5766/10000 (58%)] [Best Epoch:6 Loss:1.374447 Acc:0.576600] [Best Step:70 Loss:1.374447 Acc:0.576600] Step status
|
||||
[E:6/100] [S:70/100000] [Train Loss:1.380793 Acc:0.565800] [Val Loss:1.424537 Acc:0.554400 5544/10000 (55%)] [Best Epoch:6 Loss:1.374447 Acc:0.576600] [Best Step:70 Loss:1.374447 Acc:0.576600] [20.12s 140.5s] Epoch status
|
||||
[E:7/100] [S:75/100000] [Train Loss:1.355937 Acc:0.574000 5/10 (50%)] [Val Loss:1.365398 Acc:0.563200 5632/10000 (56%)] [Best Epoch:7 Loss:1.365398 Acc:0.563200] [Best Step:75 Loss:1.365398 Acc:0.563200] Step status
|
||||
[E:7/100] [S:80/100000] [Train Loss:1.389207 Acc:0.564000 10/10 (100%)] [Val Loss:1.312955 Acc:0.599700 5997/10000 (60%)] [Best Epoch:7 Loss:1.312955 Acc:0.599700] [Best Step:80 Loss:1.312955 Acc:0.599700] Step status
|
||||
[E:7/100] [S:80/100000] [Train Loss:1.353517 Acc:0.577600] [Val Loss:1.326047 Acc:0.598400 5984/10000 (60%)] [Best Epoch:7 Loss:1.312955 Acc:0.599700] [Best Step:80 Loss:1.312955 Acc:0.599700] [20.13s 160.6s] Epoch status
|
||||
[E:8/100] [S:85/100000] [Train Loss:1.317934 Acc:0.577000 5/10 (50%)] [Val Loss:1.294633 Acc:0.584200 5842/10000 (58%)] [Best Epoch:8 Loss:1.294633 Acc:0.584200] [Best Step:85 Loss:1.294633 Acc:0.584200] Step status
|
||||
[E:8/100] [S:90/100000] [Train Loss:1.259528 Acc:0.608000 10/10 (100%)] [Val Loss:1.308782 Acc:0.591000 5910/10000 (59%)] [Best Epoch:8 Loss:1.294633 Acc:0.584200] [Best Step:85 Loss:1.294633 Acc:0.584200] Step status
|
||||
[E:8/100] [S:90/100000] [Train Loss:1.311848 Acc:0.588400] [Val Loss:1.288862 Acc:0.592200 5922/10000 (59%)] [Best Epoch:8 Loss:1.288862 Acc:0.592200] [Best Step:85 Loss:1.294633 Acc:0.584200] [19.98s 180.6s] Epoch status
|
||||
[E:9/100] [S:95/100000] [Train Loss:1.300831 Acc:0.582000 5/10 (50%)] [Val Loss:1.300572 Acc:0.584600 5846/10000 (58%)] [Best Epoch:8 Loss:1.288862 Acc:0.592200] [Best Step:85 Loss:1.294633 Acc:0.584200] Step status
|
||||
[E:9/100] [S:100/100000] [Train Loss:1.311145 Acc:0.587000 10/10 (100%)] [Val Loss:1.231630 Acc:0.611800 6118/10000 (61%)] [Best Epoch:9 Loss:1.231630 Acc:0.611800] [Best Step:100 Loss:1.231630 Acc:0.611800] Step status
|
||||
[E:9/100] [S:100/100000] [Train Loss:1.285119 Acc:0.597900] [Val Loss:1.252770 Acc:0.601000 6010/10000 (60%)] [Best Epoch:9 Loss:1.231630 Acc:0.611800] [Best Step:100 Loss:1.231630 Acc:0.611800] [19.71s 200.3s] Epoch status
|
||||
[E:10/100] [S:105/100000] [Train Loss:1.302027 Acc:0.566000 5/10 (50%)] [Val Loss:1.285808 Acc:0.595400 5954/10000 (60%)] [Best Epoch:9 Loss:1.231630 Acc:0.611800] [Best Step:100 Loss:1.231630 Acc:0.611800] Step status
|
||||
[E:10/100] [S:110/100000] [Train Loss:1.260846 Acc:0.608000 10/10 (100%)] [Val Loss:1.253798 Acc:0.591300 5913/10000 (59%)] [Best Epoch:9 Loss:1.231630 Acc:0.611800] [Best Step:100 Loss:1.231630 Acc:0.611800] Step status
|
||||
[E:10/100] [S:110/100000] [Train Loss:1.278418 Acc:0.591100] [Val Loss:1.256654 Acc:0.595400 5954/10000 (60%)] [Best Epoch:9 Loss:1.231630 Acc:0.611800] [Best Step:100 Loss:1.231630 Acc:0.611800] [19.90s 220.2s] Epoch status
|
||||
[E:11/100] [S:115/100000] [Train Loss:1.240201 Acc:0.606000 5/10 (50%)] [Val Loss:1.228229 Acc:0.612500 6125/10000 (61%)] [Best Epoch:11 Loss:1.228229 Acc:0.612500] [Best Step:115 Loss:1.228229 Acc:0.612500] Step status
|
||||
[E:11/100] [S:120/100000] [Train Loss:1.282284 Acc:0.578000 10/10 (100%)] [Val Loss:1.214430 Acc:0.612600 6126/10000 (61%)] [Best Epoch:11 Loss:1.214430 Acc:0.612600] [Best Step:120 Loss:1.214430 Acc:0.612600] Step status
|
||||
[E:11/100] [S:120/100000] [Train Loss:1.256272 Acc:0.594700] [Val Loss:1.212468 Acc:0.614300 6143/10000 (61%)] [Best Epoch:11 Loss:1.212468 Acc:0.614300] [Best Step:120 Loss:1.214430 Acc:0.612600] [19.86s 240.1s] Epoch status
|
||||
[E:12/100] [S:125/100000] [Train Loss:1.264794 Acc:0.628000 5/10 (50%)] [Val Loss:1.241468 Acc:0.594700 5947/10000 (59%)] [Best Epoch:11 Loss:1.212468 Acc:0.614300] [Best Step:120 Loss:1.214430 Acc:0.612600] Step status
|
||||
[E:12/100] [S:130/100000] [Train Loss:1.244773 Acc:0.613000 10/10 (100%)] [Val Loss:1.184135 Acc:0.624400 6244/10000 (62%)] [Best Epoch:12 Loss:1.184135 Acc:0.624400] [Best Step:130 Loss:1.184135 Acc:0.624400] Step status
|
||||
[E:12/100] [S:130/100000] [Train Loss:1.231683 Acc:0.604600] [Val Loss:1.202855 Acc:0.614500 6145/10000 (61%)] [Best Epoch:12 Loss:1.184135 Acc:0.624400] [Best Step:130 Loss:1.184135 Acc:0.624400] [19.93s 260.0s] Epoch status
|
||||
[E:13/100] [S:135/100000] [Train Loss:1.240437 Acc:0.599000 5/10 (50%)] [Val Loss:1.327287 Acc:0.576700 5767/10000 (58%)] [Best Epoch:12 Loss:1.184135 Acc:0.624400] [Best Step:130 Loss:1.184135 Acc:0.624400] Step status
|
||||
[E:13/100] [S:140/100000] [Train Loss:1.255932 Acc:0.605000 10/10 (100%)] [Val Loss:1.245981 Acc:0.594800 5948/10000 (59%)] [Best Epoch:12 Loss:1.184135 Acc:0.624400] [Best Step:130 Loss:1.184135 Acc:0.624400] Step status
|
||||
[E:13/100] [S:140/100000] [Train Loss:1.230941 Acc:0.605700] [Val Loss:1.230431 Acc:0.601700 6017/10000 (60%)] [Best Epoch:12 Loss:1.184135 Acc:0.624400] [Best Step:130 Loss:1.184135 Acc:0.624400] [19.86s 279.9s] Epoch status
|
||||
[E:14/100] [S:145/100000] [Train Loss:1.206273 Acc:0.606000 5/10 (50%)] [Val Loss:1.161500 Acc:0.621800 6218/10000 (62%)] [Best Epoch:14 Loss:1.161500 Acc:0.621800] [Best Step:145 Loss:1.161500 Acc:0.621800] Step status
|
||||
[E:14/100] [S:150/100000] [Train Loss:1.242006 Acc:0.602000 10/10 (100%)] [Val Loss:1.281401 Acc:0.592700 5927/10000 (59%)] [Best Epoch:14 Loss:1.161500 Acc:0.621800] [Best Step:145 Loss:1.161500 Acc:0.621800] Step status
|
||||
[E:14/100] [S:150/100000] [Train Loss:1.219782 Acc:0.606500] [Val Loss:1.310329 Acc:0.586100 5861/10000 (59%)] [Best Epoch:14 Loss:1.161500 Acc:0.621800] [Best Step:145 Loss:1.161500 Acc:0.621800] [19.85s 299.7s] Epoch status
|
||||
[E:15/100] [S:155/100000] [Train Loss:1.221272 Acc:0.613000 5/10 (50%)] [Val Loss:1.212364 Acc:0.605000 6050/10000 (60%)] [Best Epoch:14 Loss:1.161500 Acc:0.621800] [Best Step:145 Loss:1.161500 Acc:0.621800] Step status
|
||||
[E:15/100] [S:160/100000] [Train Loss:1.225789 Acc:0.615000 10/10 (100%)] [Val Loss:1.148690 Acc:0.626800 6268/10000 (63%)] [Best Epoch:15 Loss:1.148690 Acc:0.626800] [Best Step:160 Loss:1.148690 Acc:0.626800] Step status
|
||||
[E:15/100] [S:160/100000] [Train Loss:1.216946 Acc:0.613700] [Val Loss:1.166461 Acc:0.618800 6188/10000 (62%)] [Best Epoch:15 Loss:1.148690 Acc:0.626800] [Best Step:160 Loss:1.148690 Acc:0.626800] [19.75s 319.5s] Epoch status
|
||||
[E:16/100] [S:165/100000] [Train Loss:1.241960 Acc:0.609000 5/10 (50%)] [Val Loss:1.161576 Acc:0.620700 6207/10000 (62%)] [Best Epoch:15 Loss:1.148690 Acc:0.626800] [Best Step:160 Loss:1.148690 Acc:0.626800] Step status
|
||||
[E:16/100] [S:170/100000] [Train Loss:1.167988 Acc:0.628000 10/10 (100%)] [Val Loss:1.219579 Acc:0.593700 5937/10000 (59%)] [Best Epoch:15 Loss:1.148690 Acc:0.626800] [Best Step:160 Loss:1.148690 Acc:0.626800] Step status
|
||||
[E:16/100] [S:170/100000] [Train Loss:1.191819 Acc:0.611600] [Val Loss:1.228102 Acc:0.600900 6009/10000 (60%)] [Best Epoch:15 Loss:1.148690 Acc:0.626800] [Best Step:160 Loss:1.148690 Acc:0.626800] [19.75s 339.2s] Epoch status
|
||||
[E:17/100] [S:175/100000] [Train Loss:1.131161 Acc:0.636000 5/10 (50%)] [Val Loss:1.201044 Acc:0.601400 6014/10000 (60%)] [Best Epoch:15 Loss:1.148690 Acc:0.626800] [Best Step:160 Loss:1.148690 Acc:0.626800] Step status
|
||||
[E:17/100] [S:180/100000] [Train Loss:1.235602 Acc:0.609000 10/10 (100%)] [Val Loss:1.156403 Acc:0.634700 6347/10000 (63%)] [Best Epoch:15 Loss:1.148690 Acc:0.626800] [Best Step:160 Loss:1.148690 Acc:0.626800] Step status
|
||||
[E:17/100] [S:180/100000] [Train Loss:1.172539 Acc:0.619400] [Val Loss:1.146564 Acc:0.636500 6365/10000 (64%)] [Best Epoch:17 Loss:1.146564 Acc:0.636500] [Best Step:160 Loss:1.148690 Acc:0.626800] [19.78s 359.0s] Epoch status
|
||||
[E:18/100] [S:185/100000] [Train Loss:1.189362 Acc:0.604000 5/10 (50%)] [Val Loss:1.200387 Acc:0.618100 6181/10000 (62%)] [Best Epoch:17 Loss:1.146564 Acc:0.636500] [Best Step:160 Loss:1.148690 Acc:0.626800] Step status
|
||||
[E:18/100] [S:190/100000] [Train Loss:1.140376 Acc:0.631000 10/10 (100%)] [Val Loss:1.189003 Acc:0.612900 6129/10000 (61%)] [Best Epoch:17 Loss:1.146564 Acc:0.636500] [Best Step:160 Loss:1.148690 Acc:0.626800] Step status
|
||||
[E:18/100] [S:190/100000] [Train Loss:1.180526 Acc:0.619500] [Val Loss:1.178285 Acc:0.619500 6195/10000 (62%)] [Best Epoch:17 Loss:1.146564 Acc:0.636500] [Best Step:160 Loss:1.148690 Acc:0.626800] [19.82s 378.8s] Epoch status
|
||||
[E:19/100] [S:195/100000] [Train Loss:1.156811 Acc:0.628000 5/10 (50%)] [Val Loss:1.190560 Acc:0.600600 6006/10000 (60%)] [Best Epoch:17 Loss:1.146564 Acc:0.636500] [Best Step:160 Loss:1.148690 Acc:0.626800] Step status
|
||||
[E:19/100] [S:200/100000] [Train Loss:1.145239 Acc:0.627000 10/10 (100%)] [Val Loss:1.205553 Acc:0.607200 6072/10000 (61%)] [Best Epoch:17 Loss:1.146564 Acc:0.636500] [Best Step:160 Loss:1.148690 Acc:0.626800] Step status
|
||||
[E:19/100] [S:200/100000] [Train Loss:1.169514 Acc:0.619400] [Val Loss:1.217982 Acc:0.609900 6099/10000 (61%)] [Best Epoch:17 Loss:1.146564 Acc:0.636500] [Best Step:160 Loss:1.148690 Acc:0.626800] [19.76s 398.6s] Epoch status
|
||||
[E:20/100] [S:205/100000] [Train Loss:1.170920 Acc:0.617000 5/10 (50%)] [Val Loss:1.154789 Acc:0.621200 6212/10000 (62%)] [Best Epoch:17 Loss:1.146564 Acc:0.636500] [Best Step:160 Loss:1.148690 Acc:0.626800] Step status
|
||||
[E:20/100] [S:210/100000] [Train Loss:1.156138 Acc:0.632000 10/10 (100%)] [Val Loss:1.105138 Acc:0.643600 6436/10000 (64%)] [Best Epoch:20 Loss:1.105138 Acc:0.643600] [Best Step:210 Loss:1.105138 Acc:0.643600] Step status
|
||||
[E:20/100] [S:210/100000] [Train Loss:1.161057 Acc:0.623100] [Val Loss:1.114177 Acc:0.640100 6401/10000 (64%)] [Best Epoch:20 Loss:1.105138 Acc:0.643600] [Best Step:210 Loss:1.105138 Acc:0.643600] [19.82s 418.4s] Epoch status
|
||||
[E:21/100] [S:215/100000] [Train Loss:1.143365 Acc:0.637000 5/10 (50%)] [Val Loss:1.164103 Acc:0.620300 6203/10000 (62%)] [Best Epoch:20 Loss:1.105138 Acc:0.643600] [Best Step:210 Loss:1.105138 Acc:0.643600] Step status
|
||||
[E:21/100] [S:220/100000] [Train Loss:1.127164 Acc:0.638000 10/10 (100%)] [Val Loss:1.134541 Acc:0.625900 6259/10000 (63%)] [Best Epoch:20 Loss:1.105138 Acc:0.643600] [Best Step:210 Loss:1.105138 Acc:0.643600] Step status
|
||||
[E:21/100] [S:220/100000] [Train Loss:1.149521 Acc:0.629100] [Val Loss:1.159968 Acc:0.618100 6181/10000 (62%)] [Best Epoch:20 Loss:1.105138 Acc:0.643600] [Best Step:210 Loss:1.105138 Acc:0.643600] [19.93s 438.3s] Epoch status
|
||||
[E:22/100] [S:225/100000] [Train Loss:1.146146 Acc:0.633000 5/10 (50%)] [Val Loss:1.139251 Acc:0.628400 6284/10000 (63%)] [Best Epoch:20 Loss:1.105138 Acc:0.643600] [Best Step:210 Loss:1.105138 Acc:0.643600] Step status
|
||||
[E:22/100] [S:230/100000] [Train Loss:1.133274 Acc:0.632000 10/10 (100%)] [Val Loss:1.113613 Acc:0.637000 6370/10000 (64%)] [Best Epoch:20 Loss:1.105138 Acc:0.643600] [Best Step:210 Loss:1.105138 Acc:0.643600] Step status
|
||||
[E:22/100] [S:230/100000] [Train Loss:1.153472 Acc:0.621500] [Val Loss:1.129085 Acc:0.635300 6353/10000 (64%)] [Best Epoch:20 Loss:1.105138 Acc:0.643600] [Best Step:210 Loss:1.105138 Acc:0.643600] [19.59s 457.9s] Epoch status
|
||||
[E:23/100] [S:235/100000] [Train Loss:1.142108 Acc:0.650000 5/10 (50%)] [Val Loss:1.246087 Acc:0.591200 5912/10000 (59%)] [Best Epoch:20 Loss:1.105138 Acc:0.643600] [Best Step:210 Loss:1.105138 Acc:0.643600] Step status
|
||||
[E:23/100] [S:240/100000] [Train Loss:1.181328 Acc:0.612000 10/10 (100%)] [Val Loss:1.121277 Acc:0.633000 6330/10000 (63%)] [Best Epoch:20 Loss:1.105138 Acc:0.643600] [Best Step:210 Loss:1.105138 Acc:0.643600] Step status
|
||||
[E:23/100] [S:240/100000] [Train Loss:1.171542 Acc:0.619100] [Val Loss:1.106423 Acc:0.641400 6414/10000 (64%)] [Best Epoch:20 Loss:1.105138 Acc:0.643600] [Best Step:210 Loss:1.105138 Acc:0.643600] [19.99s 477.9s] Epoch status
|
||||
[E:24/100] [S:245/100000] [Train Loss:1.238899 Acc:0.601000 5/10 (50%)] [Val Loss:1.130263 Acc:0.628700 6287/10000 (63%)] [Best Epoch:20 Loss:1.105138 Acc:0.643600] [Best Step:210 Loss:1.105138 Acc:0.643600] Step status
|
||||
[E:24/100] [S:250/100000] [Train Loss:1.120919 Acc:0.635000 10/10 (100%)] [Val Loss:1.158685 Acc:0.616700 6167/10000 (62%)] [Best Epoch:20 Loss:1.105138 Acc:0.643600] [Best Step:210 Loss:1.105138 Acc:0.643600] Step status
|
||||
[E:24/100] [S:250/100000] [Train Loss:1.159118 Acc:0.620700] [Val Loss:1.142953 Acc:0.622400 6224/10000 (62%)] [Best Epoch:20 Loss:1.105138 Acc:0.643600] [Best Step:210 Loss:1.105138 Acc:0.643600] [19.96s 497.9s] Epoch status
|
||||
[E:25/100] [S:255/100000] [Train Loss:1.177418 Acc:0.625000 5/10 (50%)] [Val Loss:1.182119 Acc:0.618600 6186/10000 (62%)] [Best Epoch:20 Loss:1.105138 Acc:0.643600] [Best Step:210 Loss:1.105138 Acc:0.643600] Step status
|
||||
[E:25/100] [S:260/100000] [Train Loss:1.142253 Acc:0.625000 10/10 (100%)] [Val Loss:1.105325 Acc:0.635500 6355/10000 (64%)] [Best Epoch:20 Loss:1.105138 Acc:0.643600] [Best Step:210 Loss:1.105138 Acc:0.643600] Step status
|
||||
[E:25/100] [S:260/100000] [Train Loss:1.147700 Acc:0.623400] [Val Loss:1.116359 Acc:0.630700 6307/10000 (63%)] [Best Epoch:20 Loss:1.105138 Acc:0.643600] [Best Step:210 Loss:1.105138 Acc:0.643600] [19.89s 517.8s] Epoch status
|
||||
[E:26/100] [S:265/100000] [Train Loss:1.134531 Acc:0.611000 5/10 (50%)] [Val Loss:1.134069 Acc:0.622400 6224/10000 (62%)] [Best Epoch:20 Loss:1.105138 Acc:0.643600] [Best Step:210 Loss:1.105138 Acc:0.643600] Step status
|
||||
[E:26/100] [S:270/100000] [Train Loss:1.076760 Acc:0.637000 10/10 (100%)] [Val Loss:1.150421 Acc:0.622800 6228/10000 (62%)] [Best Epoch:20 Loss:1.105138 Acc:0.643600] [Best Step:210 Loss:1.105138 Acc:0.643600] Step status
|
||||
[E:26/100] [S:270/100000] [Train Loss:1.128681 Acc:0.624300] [Val Loss:1.154195 Acc:0.615900 6159/10000 (62%)] [Best Epoch:20 Loss:1.105138 Acc:0.643600] [Best Step:210 Loss:1.105138 Acc:0.643600] [19.96s 537.7s] Epoch status
|
||||
[E:27/100] [S:275/100000] [Train Loss:1.096999 Acc:0.638000 5/10 (50%)] [Val Loss:1.140677 Acc:0.627900 6279/10000 (63%)] [Best Epoch:20 Loss:1.105138 Acc:0.643600] [Best Step:210 Loss:1.105138 Acc:0.643600] Step status
|
||||
[E:27/100] [S:280/100000] [Train Loss:1.124923 Acc:0.629000 10/10 (100%)] [Val Loss:1.075512 Acc:0.651800 6518/10000 (65%)] [Best Epoch:27 Loss:1.075512 Acc:0.651800] [Best Step:280 Loss:1.075512 Acc:0.651800] Step status
|
||||
[E:27/100] [S:280/100000] [Train Loss:1.123663 Acc:0.633600] [Val Loss:1.086114 Acc:0.647200 6472/10000 (65%)] [Best Epoch:27 Loss:1.075512 Acc:0.651800] [Best Step:280 Loss:1.075512 Acc:0.651800] [19.80s 557.5s] Epoch status
|
||||
[E:28/100] [S:285/100000] [Train Loss:1.151544 Acc:0.629000 5/10 (50%)] [Val Loss:1.138781 Acc:0.639000 6390/10000 (64%)] [Best Epoch:27 Loss:1.075512 Acc:0.651800] [Best Step:280 Loss:1.075512 Acc:0.651800] Step status
|
||||
[E:28/100] [S:290/100000] [Train Loss:1.106984 Acc:0.641000 10/10 (100%)] [Val Loss:1.122386 Acc:0.636000 6360/10000 (64%)] [Best Epoch:27 Loss:1.075512 Acc:0.651800] [Best Step:280 Loss:1.075512 Acc:0.651800] Step status
|
||||
[E:28/100] [S:290/100000] [Train Loss:1.128913 Acc:0.630800] [Val Loss:1.114227 Acc:0.640200 6402/10000 (64%)] [Best Epoch:27 Loss:1.075512 Acc:0.651800] [Best Step:280 Loss:1.075512 Acc:0.651800] [19.53s 577.1s] Epoch status
|
||||
[E:29/100] [S:295/100000] [Train Loss:1.129568 Acc:0.651000 5/10 (50%)] [Val Loss:1.164874 Acc:0.613700 6137/10000 (61%)] [Best Epoch:27 Loss:1.075512 Acc:0.651800] [Best Step:280 Loss:1.075512 Acc:0.651800] Step status
|
||||
[E:29/100] [S:300/100000] [Train Loss:1.057530 Acc:0.664000 10/10 (100%)] [Val Loss:1.081106 Acc:0.645300 6453/10000 (65%)] [Best Epoch:27 Loss:1.075512 Acc:0.651800] [Best Step:280 Loss:1.075512 Acc:0.651800] Step status
|
||||
[E:29/100] [S:300/100000] [Train Loss:1.161124 Acc:0.625000] [Val Loss:1.078104 Acc:0.649000 6490/10000 (65%)] [Best Epoch:27 Loss:1.075512 Acc:0.651800] [Best Step:280 Loss:1.075512 Acc:0.651800] [19.96s 597.0s] Epoch status
|
||||
[E:30/100] [S:305/100000] [Train Loss:1.089188 Acc:0.642000 5/10 (50%)] [Val Loss:1.130409 Acc:0.635200 6352/10000 (64%)] [Best Epoch:27 Loss:1.075512 Acc:0.651800] [Best Step:280 Loss:1.075512 Acc:0.651800] Step status
|
||||
[E:30/100] [S:310/100000] [Train Loss:1.115420 Acc:0.633000 10/10 (100%)] [Val Loss:1.116789 Acc:0.634800 6348/10000 (63%)] [Best Epoch:27 Loss:1.075512 Acc:0.651800] [Best Step:280 Loss:1.075512 Acc:0.651800] Step status
|
||||
[E:30/100] [S:310/100000] [Train Loss:1.128207 Acc:0.631000] [Val Loss:1.107183 Acc:0.638700 6387/10000 (64%)] [Best Epoch:27 Loss:1.075512 Acc:0.651800] [Best Step:280 Loss:1.075512 Acc:0.651800] [19.90s 616.9s] Epoch status
|
||||
[E:31/100] [S:315/100000] [Train Loss:1.102926 Acc:0.637000 5/10 (50%)] [Val Loss:1.183165 Acc:0.608800 6088/10000 (61%)] [Best Epoch:27 Loss:1.075512 Acc:0.651800] [Best Step:280 Loss:1.075512 Acc:0.651800] Step status
|
||||
[E:31/100] [S:320/100000] [Train Loss:1.105992 Acc:0.649000 10/10 (100%)] [Val Loss:1.115085 Acc:0.640100 6401/10000 (64%)] [Best Epoch:27 Loss:1.075512 Acc:0.651800] [Best Step:280 Loss:1.075512 Acc:0.651800] Step status
|
||||
[E:31/100] [S:320/100000] [Train Loss:1.102957 Acc:0.641400] [Val Loss:1.088188 Acc:0.643500 6435/10000 (64%)] [Best Epoch:27 Loss:1.075512 Acc:0.651800] [Best Step:280 Loss:1.075512 Acc:0.651800] [19.53s 636.4s] Epoch status
|
||||
[E:32/100] [S:325/100000] [Train Loss:1.122513 Acc:0.634000 5/10 (50%)] [Val Loss:1.099696 Acc:0.641100 6411/10000 (64%)] [Best Epoch:27 Loss:1.075512 Acc:0.651800] [Best Step:280 Loss:1.075512 Acc:0.651800] Step status
|
||||
[E:32/100] [S:330/100000] [Train Loss:1.160188 Acc:0.614000 10/10 (100%)] [Val Loss:1.097308 Acc:0.638800 6388/10000 (64%)] [Best Epoch:27 Loss:1.075512 Acc:0.651800] [Best Step:280 Loss:1.075512 Acc:0.651800] Step status
|
||||
[E:32/100] [S:330/100000] [Train Loss:1.128807 Acc:0.634400] [Val Loss:1.097507 Acc:0.640900 6409/10000 (64%)] [Best Epoch:27 Loss:1.075512 Acc:0.651800] [Best Step:280 Loss:1.075512 Acc:0.651800] [19.99s 656.4s] Epoch status
|
||||
[E:33/100] [S:335/100000] [Train Loss:1.166533 Acc:0.630000 5/10 (50%)] [Val Loss:1.074612 Acc:0.655600 6556/10000 (66%)] [Best Epoch:33 Loss:1.074612 Acc:0.655600] [Best Step:335 Loss:1.074612 Acc:0.655600] Step status
|
||||
[E:33/100] [S:340/100000] [Train Loss:1.188134 Acc:0.613000 10/10 (100%)] [Val Loss:1.069681 Acc:0.659700 6597/10000 (66%)] [Best Epoch:33 Loss:1.069681 Acc:0.659700] [Best Step:340 Loss:1.069681 Acc:0.659700] Step status
|
||||
[E:33/100] [S:340/100000] [Train Loss:1.135677 Acc:0.629400] [Val Loss:1.086565 Acc:0.651500 6515/10000 (65%)] [Best Epoch:33 Loss:1.069681 Acc:0.659700] [Best Step:340 Loss:1.069681 Acc:0.659700] [20.00s 676.4s] Epoch status
|
||||
[E:34/100] [S:345/100000] [Train Loss:1.106099 Acc:0.620000 5/10 (50%)] [Val Loss:1.108088 Acc:0.641300 6413/10000 (64%)] [Best Epoch:33 Loss:1.069681 Acc:0.659700] [Best Step:340 Loss:1.069681 Acc:0.659700] Step status
|
||||
[E:34/100] [S:350/100000] [Train Loss:1.149848 Acc:0.612000 10/10 (100%)] [Val Loss:1.105935 Acc:0.628400 6284/10000 (63%)] [Best Epoch:33 Loss:1.069681 Acc:0.659700] [Best Step:340 Loss:1.069681 Acc:0.659700] Step status
|
||||
[E:34/100] [S:350/100000] [Train Loss:1.108683 Acc:0.636100] [Val Loss:1.109058 Acc:0.627100 6271/10000 (63%)] [Best Epoch:33 Loss:1.069681 Acc:0.659700] [Best Step:340 Loss:1.069681 Acc:0.659700] [20.05s 696.5s] Epoch status
|
||||
[E:35/100] [S:355/100000] [Train Loss:1.210073 Acc:0.609000 5/10 (50%)] [Val Loss:1.135575 Acc:0.622600 6226/10000 (62%)] [Best Epoch:33 Loss:1.069681 Acc:0.659700] [Best Step:340 Loss:1.069681 Acc:0.659700] Step status
|
||||
[E:35/100] [S:360/100000] [Train Loss:1.120926 Acc:0.634000 10/10 (100%)] [Val Loss:1.109824 Acc:0.635400 6354/10000 (64%)] [Best Epoch:33 Loss:1.069681 Acc:0.659700] [Best Step:340 Loss:1.069681 Acc:0.659700] Step status
|
||||
[E:35/100] [S:360/100000] [Train Loss:1.141297 Acc:0.623200] [Val Loss:1.131826 Acc:0.635000 6350/10000 (64%)] [Best Epoch:33 Loss:1.069681 Acc:0.659700] [Best Step:340 Loss:1.069681 Acc:0.659700] [20.08s 716.6s] Epoch status
|
||||
[E:36/100] [S:365/100000] [Train Loss:1.154690 Acc:0.623000 5/10 (50%)] [Val Loss:1.109297 Acc:0.628900 6289/10000 (63%)] [Best Epoch:33 Loss:1.069681 Acc:0.659700] [Best Step:340 Loss:1.069681 Acc:0.659700] Step status
|
||||
[E:36/100] [S:370/100000] [Train Loss:1.111283 Acc:0.657000 10/10 (100%)] [Val Loss:1.120579 Acc:0.631200 6312/10000 (63%)] [Best Epoch:33 Loss:1.069681 Acc:0.659700] [Best Step:340 Loss:1.069681 Acc:0.659700] Step status
|
||||
[E:36/100] [S:370/100000] [Train Loss:1.114520 Acc:0.638000] [Val Loss:1.121302 Acc:0.628200 6282/10000 (63%)] [Best Epoch:33 Loss:1.069681 Acc:0.659700] [Best Step:340 Loss:1.069681 Acc:0.659700] [20.00s 736.6s] Epoch status
|
||||
[E:37/100] [S:375/100000] [Train Loss:1.078494 Acc:0.658000 5/10 (50%)] [Val Loss:1.142237 Acc:0.628500 6285/10000 (63%)] [Best Epoch:33 Loss:1.069681 Acc:0.659700] [Best Step:340 Loss:1.069681 Acc:0.659700] Step status
|
||||
[E:37/100] [S:380/100000] [Train Loss:1.076631 Acc:0.648000 10/10 (100%)] [Val Loss:1.098411 Acc:0.637100 6371/10000 (64%)] [Best Epoch:33 Loss:1.069681 Acc:0.659700] [Best Step:340 Loss:1.069681 Acc:0.659700] Step status
|
||||
[E:37/100] [S:380/100000] [Train Loss:1.108114 Acc:0.644600] [Val Loss:1.108840 Acc:0.633400 6334/10000 (63%)] [Best Epoch:33 Loss:1.069681 Acc:0.659700] [Best Step:340 Loss:1.069681 Acc:0.659700] [20.12s 756.7s] Epoch status
|
||||
[E:38/100] [S:385/100000] [Train Loss:1.084809 Acc:0.657000 5/10 (50%)] [Val Loss:1.110116 Acc:0.633300 6333/10000 (63%)] [Best Epoch:33 Loss:1.069681 Acc:0.659700] [Best Step:340 Loss:1.069681 Acc:0.659700] Step status
|
||||
[E:38/100] [S:390/100000] [Train Loss:1.058614 Acc:0.654000 10/10 (100%)] [Val Loss:1.080049 Acc:0.654100 6541/10000 (65%)] [Best Epoch:33 Loss:1.069681 Acc:0.659700] [Best Step:340 Loss:1.069681 Acc:0.659700] Step status
|
||||
[E:38/100] [S:390/100000] [Train Loss:1.076580 Acc:0.645200] [Val Loss:1.110053 Acc:0.643600 6436/10000 (64%)] [Best Epoch:33 Loss:1.069681 Acc:0.659700] [Best Step:340 Loss:1.069681 Acc:0.659700] [19.95s 776.6s] Epoch status
|
||||
[E:39/100] [S:395/100000] [Train Loss:1.120218 Acc:0.644000 5/10 (50%)] [Val Loss:1.120146 Acc:0.634300 6343/10000 (63%)] [Best Epoch:33 Loss:1.069681 Acc:0.659700] [Best Step:340 Loss:1.069681 Acc:0.659700] Step status
|
||||
[E:39/100] [S:400/100000] [Train Loss:1.087189 Acc:0.656000 10/10 (100%)] [Val Loss:1.102170 Acc:0.640800 6408/10000 (64%)] [Best Epoch:33 Loss:1.069681 Acc:0.659700] [Best Step:340 Loss:1.069681 Acc:0.659700] Step status
|
||||
[E:39/100] [S:400/100000] [Train Loss:1.108199 Acc:0.641600] [Val Loss:1.093920 Acc:0.645600 6456/10000 (65%)] [Best Epoch:33 Loss:1.069681 Acc:0.659700] [Best Step:340 Loss:1.069681 Acc:0.659700] [20.07s 796.7s] Epoch status
|
||||
[E:40/100] [S:405/100000] [Train Loss:1.132214 Acc:0.636000 5/10 (50%)] [Val Loss:1.128390 Acc:0.636100 6361/10000 (64%)] [Best Epoch:33 Loss:1.069681 Acc:0.659700] [Best Step:340 Loss:1.069681 Acc:0.659700] Step status
|
||||
[E:40/100] [S:410/100000] [Train Loss:1.168948 Acc:0.627000 10/10 (100%)] [Val Loss:1.159214 Acc:0.621200 6212/10000 (62%)] [Best Epoch:33 Loss:1.069681 Acc:0.659700] [Best Step:340 Loss:1.069681 Acc:0.659700] Step status
|
||||
[E:40/100] [S:410/100000] [Train Loss:1.101381 Acc:0.644900] [Val Loss:1.136318 Acc:0.620300 6203/10000 (62%)] [Best Epoch:33 Loss:1.069681 Acc:0.659700] [Best Step:340 Loss:1.069681 Acc:0.659700] [20.17s 816.9s] Epoch status
|
||||
[E:41/100] [S:415/100000] [Train Loss:1.071941 Acc:0.644000 5/10 (50%)] [Val Loss:1.108393 Acc:0.637600 6376/10000 (64%)] [Best Epoch:33 Loss:1.069681 Acc:0.659700] [Best Step:340 Loss:1.069681 Acc:0.659700] Step status
|
||||
[E:41/100] [S:420/100000] [Train Loss:1.012549 Acc:0.677000 10/10 (100%)] [Val Loss:1.095745 Acc:0.648800 6488/10000 (65%)] [Best Epoch:33 Loss:1.069681 Acc:0.659700] [Best Step:340 Loss:1.069681 Acc:0.659700] Step status
|
||||
[E:41/100] [S:420/100000] [Train Loss:1.089551 Acc:0.639700] [Val Loss:1.078069 Acc:0.640500 6405/10000 (64%)] [Best Epoch:33 Loss:1.069681 Acc:0.659700] [Best Step:340 Loss:1.069681 Acc:0.659700] [20.29s 837.2s] Epoch status
|
||||
[E:42/100] [S:425/100000] [Train Loss:1.077498 Acc:0.646000 5/10 (50%)] [Val Loss:1.101282 Acc:0.638700 6387/10000 (64%)] [Best Epoch:33 Loss:1.069681 Acc:0.659700] [Best Step:340 Loss:1.069681 Acc:0.659700] Step status
|
||||
[E:42/100] [S:430/100000] [Train Loss:1.049053 Acc:0.662000 10/10 (100%)] [Val Loss:1.118357 Acc:0.630000 6300/10000 (63%)] [Best Epoch:33 Loss:1.069681 Acc:0.659700] [Best Step:340 Loss:1.069681 Acc:0.659700] Step status
|
||||
[E:42/100] [S:430/100000] [Train Loss:1.137134 Acc:0.628800] [Val Loss:1.118225 Acc:0.633800 6338/10000 (63%)] [Best Epoch:33 Loss:1.069681 Acc:0.659700] [Best Step:340 Loss:1.069681 Acc:0.659700] [20.08s 857.3s] Epoch status
|
||||
[E:43/100] [S:435/100000] [Train Loss:1.164805 Acc:0.607000 5/10 (50%)] [Val Loss:1.086441 Acc:0.634100 6341/10000 (63%)] [Best Epoch:33 Loss:1.069681 Acc:0.659700] [Best Step:340 Loss:1.069681 Acc:0.659700] Step status
|
||||
[E:43/100] [S:440/100000] [Train Loss:1.119229 Acc:0.634000 10/10 (100%)] [Val Loss:1.066756 Acc:0.655100 6551/10000 (66%)] [Best Epoch:43 Loss:1.066756 Acc:0.655100] [Best Step:440 Loss:1.066756 Acc:0.655100] Step status
|
||||
[E:43/100] [S:440/100000] [Train Loss:1.120700 Acc:0.632700] [Val Loss:1.063598 Acc:0.650300 6503/10000 (65%)] [Best Epoch:43 Loss:1.063598 Acc:0.650300] [Best Step:440 Loss:1.066756 Acc:0.655100] [20.09s 877.4s] Epoch status
|
||||
[E:44/100] [S:445/100000] [Train Loss:1.144864 Acc:0.616000 5/10 (50%)] [Val Loss:1.105061 Acc:0.637000 6370/10000 (64%)] [Best Epoch:43 Loss:1.063598 Acc:0.650300] [Best Step:440 Loss:1.066756 Acc:0.655100] Step status
|
||||
[E:44/100] [S:450/100000] [Train Loss:1.078246 Acc:0.648000 10/10 (100%)] [Val Loss:1.094258 Acc:0.638100 6381/10000 (64%)] [Best Epoch:43 Loss:1.063598 Acc:0.650300] [Best Step:440 Loss:1.066756 Acc:0.655100] Step status
|
||||
[E:44/100] [S:450/100000] [Train Loss:1.109884 Acc:0.631800] [Val Loss:1.109701 Acc:0.636700 6367/10000 (64%)] [Best Epoch:43 Loss:1.063598 Acc:0.650300] [Best Step:440 Loss:1.066756 Acc:0.655100] [20.07s 897.4s] Epoch status
|
||||
[E:45/100] [S:455/100000] [Train Loss:1.106094 Acc:0.641000 5/10 (50%)] [Val Loss:1.129583 Acc:0.636800 6368/10000 (64%)] [Best Epoch:43 Loss:1.063598 Acc:0.650300] [Best Step:440 Loss:1.066756 Acc:0.655100] Step status
|
||||
[E:45/100] [S:460/100000] [Train Loss:1.064615 Acc:0.640000 10/10 (100%)] [Val Loss:1.077839 Acc:0.642200 6422/10000 (64%)] [Best Epoch:43 Loss:1.063598 Acc:0.650300] [Best Step:440 Loss:1.066756 Acc:0.655100] Step status
|
||||
[E:45/100] [S:460/100000] [Train Loss:1.114823 Acc:0.634400] [Val Loss:1.086220 Acc:0.638200 6382/10000 (64%)] [Best Epoch:43 Loss:1.063598 Acc:0.650300] [Best Step:440 Loss:1.066756 Acc:0.655100] [20.23s 917.6s] Epoch status
|
||||
[E:46/100] [S:465/100000] [Train Loss:1.068665 Acc:0.657000 5/10 (50%)] [Val Loss:1.125552 Acc:0.630600 6306/10000 (63%)] [Best Epoch:43 Loss:1.063598 Acc:0.650300] [Best Step:440 Loss:1.066756 Acc:0.655100] Step status
|
||||
[E:46/100] [S:470/100000] [Train Loss:1.091016 Acc:0.639000 10/10 (100%)] [Val Loss:1.077943 Acc:0.650200 6502/10000 (65%)] [Best Epoch:43 Loss:1.063598 Acc:0.650300] [Best Step:440 Loss:1.066756 Acc:0.655100] Step status
|
||||
[E:46/100] [S:470/100000] [Train Loss:1.079658 Acc:0.643300] [Val Loss:1.064142 Acc:0.646300 6463/10000 (65%)] [Best Epoch:43 Loss:1.063598 Acc:0.650300] [Best Step:440 Loss:1.066756 Acc:0.655100] [20.21s 937.9s] Epoch status
|
||||
[E:47/100] [S:475/100000] [Train Loss:1.121663 Acc:0.629000 5/10 (50%)] [Val Loss:1.081663 Acc:0.645500 6455/10000 (65%)] [Best Epoch:43 Loss:1.063598 Acc:0.650300] [Best Step:440 Loss:1.066756 Acc:0.655100] Step status
|
||||
[E:47/100] [S:480/100000] [Train Loss:1.073009 Acc:0.650000 10/10 (100%)] [Val Loss:1.126041 Acc:0.641300 6413/10000 (64%)] [Best Epoch:43 Loss:1.063598 Acc:0.650300] [Best Step:440 Loss:1.066756 Acc:0.655100] Step status
|
||||
[E:47/100] [S:480/100000] [Train Loss:1.106697 Acc:0.631300] [Val Loss:1.122543 Acc:0.636800 6368/10000 (64%)] [Best Epoch:43 Loss:1.063598 Acc:0.650300] [Best Step:440 Loss:1.066756 Acc:0.655100] [20.14s 958.0s] Epoch status
|
||||
[E:48/100] [S:485/100000] [Train Loss:1.060541 Acc:0.645000 5/10 (50%)] [Val Loss:1.085254 Acc:0.636600 6366/10000 (64%)] [Best Epoch:43 Loss:1.063598 Acc:0.650300] [Best Step:440 Loss:1.066756 Acc:0.655100] Step status
|
||||
[E:48/100] [S:490/100000] [Train Loss:1.043159 Acc:0.645000 10/10 (100%)] [Val Loss:1.059383 Acc:0.657000 6570/10000 (66%)] [Best Epoch:48 Loss:1.059383 Acc:0.657000] [Best Step:490 Loss:1.059383 Acc:0.657000] Step status
|
||||
[E:48/100] [S:490/100000] [Train Loss:1.097869 Acc:0.634500] [Val Loss:1.081426 Acc:0.647400 6474/10000 (65%)] [Best Epoch:48 Loss:1.059383 Acc:0.657000] [Best Step:490 Loss:1.059383 Acc:0.657000] [19.92s 977.9s] Epoch status
|
||||
[E:49/100] [S:495/100000] [Train Loss:1.116079 Acc:0.624000 5/10 (50%)] [Val Loss:1.094050 Acc:0.629400 6294/10000 (63%)] [Best Epoch:48 Loss:1.059383 Acc:0.657000] [Best Step:490 Loss:1.059383 Acc:0.657000] Step status
|
||||
[E:49/100] [S:500/100000] [Train Loss:1.130248 Acc:0.634000 10/10 (100%)] [Val Loss:1.051219 Acc:0.662200 6622/10000 (66%)] [Best Epoch:49 Loss:1.051219 Acc:0.662200] [Best Step:500 Loss:1.051219 Acc:0.662200] Step status
|
||||
[E:49/100] [S:500/100000] [Train Loss:1.093293 Acc:0.638800] [Val Loss:1.045672 Acc:0.658600 6586/10000 (66%)] [Best Epoch:49 Loss:1.045672 Acc:0.658600] [Best Step:500 Loss:1.051219 Acc:0.662200] [19.84s 997.8s] Epoch status
|
||||
[E:50/100] [S:505/100000] [Train Loss:1.140463 Acc:0.638000 5/10 (50%)] [Val Loss:1.060365 Acc:0.654500 6545/10000 (65%)] [Best Epoch:49 Loss:1.045672 Acc:0.658600] [Best Step:500 Loss:1.051219 Acc:0.662200] Step status
|
||||
[E:50/100] [S:510/100000] [Train Loss:1.044390 Acc:0.663000 10/10 (100%)] [Val Loss:1.106635 Acc:0.644200 6442/10000 (64%)] [Best Epoch:49 Loss:1.045672 Acc:0.658600] [Best Step:500 Loss:1.051219 Acc:0.662200] Step status
|
||||
[E:50/100] [S:510/100000] [Train Loss:1.095504 Acc:0.643800] [Val Loss:1.072353 Acc:0.647600 6476/10000 (65%)] [Best Epoch:49 Loss:1.045672 Acc:0.658600] [Best Step:500 Loss:1.051219 Acc:0.662200] [19.85s 1017.6s] Epoch status
|
||||
[E:51/100] [S:515/100000] [Train Loss:1.149685 Acc:0.613000 5/10 (50%)] [Val Loss:1.135782 Acc:0.633300 6333/10000 (63%)] [Best Epoch:49 Loss:1.045672 Acc:0.658600] [Best Step:500 Loss:1.051219 Acc:0.662200] Step status
|
||||
[E:51/100] [S:520/100000] [Train Loss:1.077325 Acc:0.638000 10/10 (100%)] [Val Loss:1.137759 Acc:0.635800 6358/10000 (64%)] [Best Epoch:49 Loss:1.045672 Acc:0.658600] [Best Step:500 Loss:1.051219 Acc:0.662200] Step status
|
||||
[E:51/100] [S:520/100000] [Train Loss:1.088232 Acc:0.639000] [Val Loss:1.094860 Acc:0.645500 6455/10000 (65%)] [Best Epoch:49 Loss:1.045672 Acc:0.658600] [Best Step:500 Loss:1.051219 Acc:0.662200] [19.91s 1037.5s] Epoch status
|
||||
[E:52/100] [S:525/100000] [Train Loss:1.127501 Acc:0.626000 5/10 (50%)] [Val Loss:1.086300 Acc:0.640000 6400/10000 (64%)] [Best Epoch:49 Loss:1.045672 Acc:0.658600] [Best Step:500 Loss:1.051219 Acc:0.662200] Step status
|
||||
[E:52/100] [S:530/100000] [Train Loss:1.065977 Acc:0.658000 10/10 (100%)] [Val Loss:1.122966 Acc:0.630200 6302/10000 (63%)] [Best Epoch:49 Loss:1.045672 Acc:0.658600] [Best Step:500 Loss:1.051219 Acc:0.662200] Step status
|
||||
[E:52/100] [S:530/100000] [Train Loss:1.097328 Acc:0.640900] [Val Loss:1.117715 Acc:0.633000 6330/10000 (63%)] [Best Epoch:49 Loss:1.045672 Acc:0.658600] [Best Step:500 Loss:1.051219 Acc:0.662200] [19.86s 1057.4s] Epoch status
|
||||
[E:53/100] [S:535/100000] [Train Loss:1.087094 Acc:0.647000 5/10 (50%)] [Val Loss:1.087964 Acc:0.632900 6329/10000 (63%)] [Best Epoch:49 Loss:1.045672 Acc:0.658600] [Best Step:500 Loss:1.051219 Acc:0.662200] Step status
|
||||
[E:53/100] [S:540/100000] [Train Loss:1.050918 Acc:0.646000 10/10 (100%)] [Val Loss:1.064021 Acc:0.655500 6555/10000 (66%)] [Best Epoch:49 Loss:1.045672 Acc:0.658600] [Best Step:500 Loss:1.051219 Acc:0.662200] Step status
|
||||
[E:53/100] [S:540/100000] [Train Loss:1.074915 Acc:0.646400] [Val Loss:1.057349 Acc:0.655300 6553/10000 (66%)] [Best Epoch:49 Loss:1.045672 Acc:0.658600] [Best Step:500 Loss:1.051219 Acc:0.662200] [19.91s 1077.3s] Epoch status
|
||||
[E:54/100] [S:545/100000] [Train Loss:1.138784 Acc:0.627000 5/10 (50%)] [Val Loss:1.070358 Acc:0.645900 6459/10000 (65%)] [Best Epoch:49 Loss:1.045672 Acc:0.658600] [Best Step:500 Loss:1.051219 Acc:0.662200] Step status
|
||||
[E:54/100] [S:550/100000] [Train Loss:1.155756 Acc:0.632000 10/10 (100%)] [Val Loss:1.137023 Acc:0.621300 6213/10000 (62%)] [Best Epoch:49 Loss:1.045672 Acc:0.658600] [Best Step:500 Loss:1.051219 Acc:0.662200] Step status
|
||||
[E:54/100] [S:550/100000] [Train Loss:1.084555 Acc:0.642900] [Val Loss:1.136311 Acc:0.621600 6216/10000 (62%)] [Best Epoch:49 Loss:1.045672 Acc:0.658600] [Best Step:500 Loss:1.051219 Acc:0.662200] [19.92s 1097.2s] Epoch status
|
||||
[E:55/100] [S:555/100000] [Train Loss:1.081485 Acc:0.633000 5/10 (50%)] [Val Loss:1.024534 Acc:0.662600 6626/10000 (66%)] [Best Epoch:55 Loss:1.024534 Acc:0.662600] [Best Step:555 Loss:1.024534 Acc:0.662600] Step status
|
||||
[E:55/100] [S:560/100000] [Train Loss:1.083786 Acc:0.642000 10/10 (100%)] [Val Loss:1.085675 Acc:0.648200 6482/10000 (65%)] [Best Epoch:55 Loss:1.024534 Acc:0.662600] [Best Step:555 Loss:1.024534 Acc:0.662600] Step status
|
||||
[E:55/100] [S:560/100000] [Train Loss:1.079649 Acc:0.646200] [Val Loss:1.094631 Acc:0.647700 6477/10000 (65%)] [Best Epoch:55 Loss:1.024534 Acc:0.662600] [Best Step:555 Loss:1.024534 Acc:0.662600] [19.92s 1117.1s] Epoch status
|
||||
[E:56/100] [S:565/100000] [Train Loss:1.067666 Acc:0.651000 5/10 (50%)] [Val Loss:1.081291 Acc:0.646100 6461/10000 (65%)] [Best Epoch:55 Loss:1.024534 Acc:0.662600] [Best Step:555 Loss:1.024534 Acc:0.662600] Step status
|
||||
[E:56/100] [S:570/100000] [Train Loss:1.088738 Acc:0.644000 10/10 (100%)] [Val Loss:1.052702 Acc:0.644800 6448/10000 (64%)] [Best Epoch:55 Loss:1.024534 Acc:0.662600] [Best Step:555 Loss:1.024534 Acc:0.662600] Step status
|
||||
[E:56/100] [S:570/100000] [Train Loss:1.095016 Acc:0.637200] [Val Loss:1.071500 Acc:0.634100 6341/10000 (63%)] [Best Epoch:55 Loss:1.024534 Acc:0.662600] [Best Step:555 Loss:1.024534 Acc:0.662600] [19.79s 1136.9s] Epoch status
|
||||
[E:57/100] [S:575/100000] [Train Loss:1.029626 Acc:0.657000 5/10 (50%)] [Val Loss:1.076980 Acc:0.643200 6432/10000 (64%)] [Best Epoch:55 Loss:1.024534 Acc:0.662600] [Best Step:555 Loss:1.024534 Acc:0.662600] Step status
|
||||
[E:57/100] [S:580/100000] [Train Loss:1.092300 Acc:0.646000 10/10 (100%)] [Val Loss:1.067589 Acc:0.644800 6448/10000 (64%)] [Best Epoch:55 Loss:1.024534 Acc:0.662600] [Best Step:555 Loss:1.024534 Acc:0.662600] Step status
|
||||
[E:57/100] [S:580/100000] [Train Loss:1.093117 Acc:0.641300] [Val Loss:1.073517 Acc:0.639200 6392/10000 (64%)] [Best Epoch:55 Loss:1.024534 Acc:0.662600] [Best Step:555 Loss:1.024534 Acc:0.662600] [20.52s 1157.4s] Epoch status
|
||||
[E:58/100] [S:585/100000] [Train Loss:1.049593 Acc:0.635000 5/10 (50%)] [Val Loss:1.145984 Acc:0.628800 6288/10000 (63%)] [Best Epoch:55 Loss:1.024534 Acc:0.662600] [Best Step:555 Loss:1.024534 Acc:0.662600] Step status
|
||||
[E:58/100] [S:590/100000] [Train Loss:1.075136 Acc:0.660000 10/10 (100%)] [Val Loss:1.111776 Acc:0.636600 6366/10000 (64%)] [Best Epoch:55 Loss:1.024534 Acc:0.662600] [Best Step:555 Loss:1.024534 Acc:0.662600] Step status
|
||||
[E:58/100] [S:590/100000] [Train Loss:1.085887 Acc:0.649100] [Val Loss:1.108645 Acc:0.643800 6438/10000 (64%)] [Best Epoch:55 Loss:1.024534 Acc:0.662600] [Best Step:555 Loss:1.024534 Acc:0.662600] [20.11s 1177.5s] Epoch status
|
||||
[E:59/100] [S:595/100000] [Train Loss:1.116177 Acc:0.632000 5/10 (50%)] [Val Loss:1.064075 Acc:0.655500 6555/10000 (66%)] [Best Epoch:55 Loss:1.024534 Acc:0.662600] [Best Step:555 Loss:1.024534 Acc:0.662600] Step status
|
||||
[E:59/100] [S:600/100000] [Train Loss:1.113566 Acc:0.638000 10/10 (100%)] [Val Loss:1.086235 Acc:0.634800 6348/10000 (63%)] [Best Epoch:55 Loss:1.024534 Acc:0.662600] [Best Step:555 Loss:1.024534 Acc:0.662600] Step status
|
||||
[E:59/100] [S:600/100000] [Train Loss:1.115535 Acc:0.635300] [Val Loss:1.092341 Acc:0.629400 6294/10000 (63%)] [Best Epoch:55 Loss:1.024534 Acc:0.662600] [Best Step:555 Loss:1.024534 Acc:0.662600] [22.62s 1200.2s] Epoch status
|
||||
[E:60/100] [S:605/100000] [Train Loss:1.123111 Acc:0.647000 5/10 (50%)] [Val Loss:1.084873 Acc:0.636800 6368/10000 (64%)] [Best Epoch:55 Loss:1.024534 Acc:0.662600] [Best Step:555 Loss:1.024534 Acc:0.662600] Step status
|
||||
[E:60/100] [S:610/100000] [Train Loss:1.101901 Acc:0.631000 10/10 (100%)] [Val Loss:1.097978 Acc:0.644300 6443/10000 (64%)] [Best Epoch:55 Loss:1.024534 Acc:0.662600] [Best Step:555 Loss:1.024534 Acc:0.662600] Step status
|
||||
[E:60/100] [S:610/100000] [Train Loss:1.082605 Acc:0.643100] [Val Loss:1.081505 Acc:0.653200 6532/10000 (65%)] [Best Epoch:55 Loss:1.024534 Acc:0.662600] [Best Step:555 Loss:1.024534 Acc:0.662600] [20.09s 1220.3s] Epoch status
|
||||
[E:61/100] [S:615/100000] [Train Loss:1.117607 Acc:0.641000 5/10 (50%)] [Val Loss:1.093737 Acc:0.640200 6402/10000 (64%)] [Best Epoch:55 Loss:1.024534 Acc:0.662600] [Best Step:555 Loss:1.024534 Acc:0.662600] Step status
|
||||
[E:61/100] [S:620/100000] [Train Loss:1.094660 Acc:0.636000 10/10 (100%)] [Val Loss:1.097514 Acc:0.642800 6428/10000 (64%)] [Best Epoch:55 Loss:1.024534 Acc:0.662600] [Best Step:555 Loss:1.024534 Acc:0.662600] Step status
|
||||
[E:61/100] [S:620/100000] [Train Loss:1.104322 Acc:0.637600] [Val Loss:1.076359 Acc:0.650200 6502/10000 (65%)] [Best Epoch:55 Loss:1.024534 Acc:0.662600] [Best Step:555 Loss:1.024534 Acc:0.662600] [20.08s 1240.3s] Epoch status
|
||||
[E:62/100] [S:625/100000] [Train Loss:1.112613 Acc:0.633000 5/10 (50%)] [Val Loss:1.094935 Acc:0.642300 6423/10000 (64%)] [Best Epoch:55 Loss:1.024534 Acc:0.662600] [Best Step:555 Loss:1.024534 Acc:0.662600] Step status
|
||||
[E:62/100] [S:630/100000] [Train Loss:1.017960 Acc:0.664000 10/10 (100%)] [Val Loss:1.061892 Acc:0.649300 6493/10000 (65%)] [Best Epoch:55 Loss:1.024534 Acc:0.662600] [Best Step:555 Loss:1.024534 Acc:0.662600] Step status
|
||||
[E:62/100] [S:630/100000] [Train Loss:1.087227 Acc:0.636500] [Val Loss:1.037155 Acc:0.658900 6589/10000 (66%)] [Best Epoch:55 Loss:1.024534 Acc:0.662600] [Best Step:555 Loss:1.024534 Acc:0.662600] [20.14s 1260.5s] Epoch status
|
||||
[E:63/100] [S:635/100000] [Train Loss:1.038473 Acc:0.675000 5/10 (50%)] [Val Loss:1.059394 Acc:0.654400 6544/10000 (65%)] [Best Epoch:55 Loss:1.024534 Acc:0.662600] [Best Step:555 Loss:1.024534 Acc:0.662600] Step status
|
||||
[E:63/100] [S:640/100000] [Train Loss:1.089411 Acc:0.641000 10/10 (100%)] [Val Loss:1.102378 Acc:0.638500 6385/10000 (64%)] [Best Epoch:55 Loss:1.024534 Acc:0.662600] [Best Step:555 Loss:1.024534 Acc:0.662600] Step status
|
||||
[E:63/100] [S:640/100000] [Train Loss:1.088521 Acc:0.650700] [Val Loss:1.115940 Acc:0.633800 6338/10000 (63%)] [Best Epoch:55 Loss:1.024534 Acc:0.662600] [Best Step:555 Loss:1.024534 Acc:0.662600] [19.81s 1280.3s] Epoch status
|
||||
[E:64/100] [S:645/100000] [Train Loss:1.117524 Acc:0.622000 5/10 (50%)] [Val Loss:1.086889 Acc:0.645500 6455/10000 (65%)] [Best Epoch:55 Loss:1.024534 Acc:0.662600] [Best Step:555 Loss:1.024534 Acc:0.662600] Step status
|
||||
[E:64/100] [S:650/100000] [Train Loss:1.139455 Acc:0.630000 10/10 (100%)] [Val Loss:1.077764 Acc:0.644100 6441/10000 (64%)] [Best Epoch:55 Loss:1.024534 Acc:0.662600] [Best Step:555 Loss:1.024534 Acc:0.662600] Step status
|
||||
[E:64/100] [S:650/100000] [Train Loss:1.091119 Acc:0.643600] [Val Loss:1.078986 Acc:0.650200 6502/10000 (65%)] [Best Epoch:55 Loss:1.024534 Acc:0.662600] [Best Step:555 Loss:1.024534 Acc:0.662600] [19.86s 1300.1s] Epoch status
|
||||
[E:65/100] [S:655/100000] [Train Loss:1.135261 Acc:0.633000 5/10 (50%)] [Val Loss:1.109965 Acc:0.645600 6456/10000 (65%)] [Best Epoch:55 Loss:1.024534 Acc:0.662600] [Best Step:555 Loss:1.024534 Acc:0.662600] Step status
|
||||
[E:65/100] [S:660/100000] [Train Loss:1.104078 Acc:0.632000 10/10 (100%)] [Val Loss:1.067992 Acc:0.644500 6445/10000 (64%)] [Best Epoch:55 Loss:1.024534 Acc:0.662600] [Best Step:555 Loss:1.024534 Acc:0.662600] Step status
|
||||
Early Stop With step: 660
|
||||
[E:65/100] [S:660/100000] [Train Loss:1.092243 Acc:0.646400] [Val Loss:1.067515 Acc:0.640500 6405/10000 (64%)] [Best Epoch:55 Loss:1.024534 Acc:0.662600] [Best Step:555 Loss:1.024534 Acc:0.662600] [19.72s 1319.9s] Epoch status
|
||||
|
||||
> Dovesky
|
||||
[[-11.15456 -12.608416 -4.3386755 -9.514153 -10.997666
|
||||
-5.879319 -5.6799726 -1.4792681 -9.864466 -2.6395202
|
||||
-10.024909 -3.0944839 -0.46927977 -9.845455 -12.673562
|
||||
-5.267896 -14.660818 -5.2269883 ]]
|
||||
['Korean', 'Chinese', 'Polish', 'Greek', 'Spanish', 'Dutch', 'Scottish', 'Czech', 'Arabic', 'Irish', 'Japanese', 'English', 'Russian', 'Italian', 'Portuguese', 'French', 'Vietnamese', 'German']
|
||||
(-0.47) Russian
|
||||
(-1.48) Czech
|
||||
(-2.64) Irish
|
||||
|
||||
> Jackson
|
||||
[[-11.136252 -13.164243 -9.152903 -5.671031 -9.121303
|
||||
-6.3863363 -0.11942005 -6.688505 -11.711407 -8.398445
|
||||
-11.543731 -2.4797602 -5.3600435 -11.202702 -13.145058
|
||||
-4.0789714 -12.38205 -8.085201 ]]
|
||||
['Korean', 'Chinese', 'Polish', 'Greek', 'Spanish', 'Dutch', 'Scottish', 'Czech', 'Arabic', 'Irish', 'Japanese', 'English', 'Russian', 'Italian', 'Portuguese', 'French', 'Vietnamese', 'German']
|
||||
(-0.12) Scottish
|
||||
(-2.48) English
|
||||
(-4.08) French
|
||||
|
||||
> Satoshi
|
||||
[[-12.311507 -7.3708954 -3.0775084 -3.1271336 -5.84587
|
||||
-5.185088 -5.1088986 -5.3196754 -2.5652363 -6.6197653
|
||||
-0.35248804 -6.264645 -5.089513 -2.9695826 -3.3472583
|
||||
-5.7556515 -9.279834 -4.5078664 ]]
|
||||
['Korean', 'Chinese', 'Polish', 'Greek', 'Spanish', 'Dutch', 'Scottish', 'Czech', 'Arabic', 'Irish', 'Japanese', 'English', 'Russian', 'Italian', 'Portuguese', 'French', 'Vietnamese', 'German']
|
||||
(-0.35) Japanese
|
||||
(-2.57) Arabic
|
||||
(-2.97) Italian
|
||||
|
||||
> Foong
|
||||
[[-2.9198012 -1.0796714 -4.7780027 -7.685022 -5.1835794 -3.2180996
|
||||
-4.860241 -2.9677668 -6.3561764 -2.3206499 -4.878993 -2.954226
|
||||
-4.6359863 -3.9107776 -7.0906854 -4.5995116 -1.666034 -2.2692003]]
|
||||
['Korean', 'Chinese', 'Polish', 'Greek', 'Spanish', 'Dutch', 'Scottish', 'Czech', 'Arabic', 'Irish', 'Japanese', 'English', 'Russian', 'Italian', 'Portuguese', 'French', 'Vietnamese', 'German']
|
||||
(-1.08) Chinese
|
||||
(-1.67) Vietnamese
|
||||
(-2.27) German
|
||||
|
||||
> Tsai
|
||||
[[-7.5000424 -1.0480399 -4.730262 -5.5038843 -6.9739256 -7.76619
|
||||
-5.7381983 -6.0231447 -0.9425225 -9.867343 -3.251836 -7.269185
|
||||
-7.3243184 -2.6890192 -7.4435186 -7.567208 -2.0430076 -7.6369867]]
|
||||
['Korean', 'Chinese', 'Polish', 'Greek', 'Spanish', 'Dutch', 'Scottish', 'Czech', 'Arabic', 'Irish', 'Japanese', 'English', 'Russian', 'Italian', 'Portuguese', 'French', 'Vietnamese', 'German']
|
||||
(-0.94) Arabic
|
||||
(-1.05) Chinese
|
||||
(-2.04) Vietnamese
|
||||
|
||||
> Dovesky
|
||||
[[-11.15456 -12.608416 -4.3386755 -9.514153 -10.997666
|
||||
-5.879319 -5.6799726 -1.4792681 -9.864466 -2.6395202
|
||||
-10.024909 -3.0944839 -0.46927977 -9.845455 -12.673562
|
||||
-5.267896 -14.660818 -5.2269883 ]]
|
||||
['Korean', 'Chinese', 'Polish', 'Greek', 'Spanish', 'Dutch', 'Scottish', 'Czech', 'Arabic', 'Irish', 'Japanese', 'English', 'Russian', 'Italian', 'Portuguese', 'French', 'Vietnamese', 'German']
|
||||
(-0.47) Russian
|
||||
(-1.48) Czech
|
||||
(-2.64) Irish
|
||||
|
||||
> Jackson
|
||||
[[-11.136252 -13.164243 -9.152903 -5.671031 -9.121303
|
||||
-6.3863363 -0.11942005 -6.688505 -11.711407 -8.398445
|
||||
-11.543731 -2.4797602 -5.3600435 -11.202702 -13.145058
|
||||
-4.0789714 -12.38205 -8.085201 ]]
|
||||
['Korean', 'Chinese', 'Polish', 'Greek', 'Spanish', 'Dutch', 'Scottish', 'Czech', 'Arabic', 'Irish', 'Japanese', 'English', 'Russian', 'Italian', 'Portuguese', 'French', 'Vietnamese', 'German']
|
||||
(-0.12) Scottish
|
||||
(-2.48) English
|
||||
(-4.08) French
|
||||
|
||||
> Satoshi
|
||||
[[-12.311507 -7.3708954 -3.0775084 -3.1271336 -5.84587
|
||||
-5.185088 -5.1088986 -5.3196754 -2.5652363 -6.6197653
|
||||
-0.35248804 -6.264645 -5.089513 -2.9695826 -3.3472583
|
||||
-5.7556515 -9.279834 -4.5078664 ]]
|
||||
['Korean', 'Chinese', 'Polish', 'Greek', 'Spanish', 'Dutch', 'Scottish', 'Czech', 'Arabic', 'Irish', 'Japanese', 'English', 'Russian', 'Italian', 'Portuguese', 'French', 'Vietnamese', 'German']
|
||||
(-0.35) Japanese
|
||||
(-2.57) Arabic
|
||||
(-2.97) Italian
|
||||
|
||||
> Foong
|
||||
[[-2.9198012 -1.0796714 -4.7780027 -7.685022 -5.1835794 -3.2180996
|
||||
-4.860241 -2.9677668 -6.3561764 -2.3206499 -4.878993 -2.954226
|
||||
-4.6359863 -3.9107776 -7.0906854 -4.5995116 -1.666034 -2.2692003]]
|
||||
['Korean', 'Chinese', 'Polish', 'Greek', 'Spanish', 'Dutch', 'Scottish', 'Czech', 'Arabic', 'Irish', 'Japanese', 'English', 'Russian', 'Italian', 'Portuguese', 'French', 'Vietnamese', 'German']
|
||||
(-1.08) Chinese
|
||||
(-1.67) Vietnamese
|
||||
(-2.27) German
|
||||
|
||||
> Tsai
|
||||
[[-7.5000424 -1.0480399 -4.730262 -5.5038843 -6.9739256 -7.76619
|
||||
-5.7381983 -6.0231447 -0.9425225 -9.867343 -3.251836 -7.269185
|
||||
-7.3243184 -2.6890192 -7.4435186 -7.567208 -2.0430076 -7.6369867]]
|
||||
['Korean', 'Chinese', 'Polish', 'Greek', 'Spanish', 'Dutch', 'Scottish', 'Czech', 'Arabic', 'Irish', 'Japanese', 'English', 'Russian', 'Italian', 'Portuguese', 'French', 'Vietnamese', 'German']
|
||||
(-0.94) Arabic
|
||||
(-1.05) Chinese
|
||||
(-2.04) Vietnamese
|
||||
|
||||
```
|
|
@ -0,0 +1,198 @@
|
|||
from GetPartnerCovid import CastToTrainWithOrderCovid,GetOrderLineDetailCovid
|
||||
from GetPartner import GetOrderLineDetail,CastToTrainWithOrder
|
||||
|
||||
from ReorderByGroupCovid import ReorderByGroupCovid
|
||||
from ReorderByGroup import ReorderByGroup
|
||||
from GetDifferPKGCovid import GetDifferCovid
|
||||
from GetDifferPKG import GetDiffer
|
||||
|
||||
from gevent import pywsgi
|
||||
from flask import Flask,render_template,request
|
||||
from flask_cors import CORS
|
||||
|
||||
import numpy as np
|
||||
import copy
|
||||
host='127.0.0.1'
|
||||
port=6666
|
||||
app=Flask(__name__)
|
||||
CORS(app, supports_credentials=True)
|
||||
|
||||
|
||||
|
||||
@app.route('/')
|
||||
def index():
|
||||
return "hello world!"
|
||||
|
||||
|
||||
|
||||
#原来句子的序列,为list
|
||||
oriSenList=[]
|
||||
|
||||
#根据重要性绘制的颜色
|
||||
# colorCount=[]
|
||||
modelType=0
|
||||
differCount=[]
|
||||
|
||||
MyReorder=None
|
||||
|
||||
import json
|
||||
|
||||
|
||||
import sys
|
||||
sys.path.append('../')
|
||||
# from RNNModelPredict import aRNNModel
|
||||
|
||||
RNNModel=None
|
||||
|
||||
@app.route('/oriInputSigni',methods=['POST']) #第一个参数是路由,第二个是请求方法
|
||||
#获取原始输入,计算predict结果以及重要性
|
||||
#计算和输入有关的所有可解释性数据
|
||||
def oriInputSigni():
|
||||
global MyReorder,oriSenList,differCount,myinput # colorCount
|
||||
MyReorder=None
|
||||
MyDiffer=None
|
||||
|
||||
myinput = request.get_json().get('input') #得到前端传送的数据
|
||||
|
||||
|
||||
#在此篇论文中放弃了使用单词的重排序方式以及使用隐藏单元进行判定的方式
|
||||
#此处两个变量的赋值无意义
|
||||
# modelType=request.get_json().get('modelType') #知道当前是句子级别分析还是单词级别分析
|
||||
# judgeType=request.get_json().get('judgeType') #知道当前是根据输出判断重要性还是隐藏层变化判断重要性
|
||||
modelType='clause'
|
||||
judgeType='output'
|
||||
|
||||
|
||||
#情感分析任务区分使用了LSTM还是GRU
|
||||
rnnType = request.get_json().get('rnnType')
|
||||
#确定是情感分析任务还是新冠预测任务
|
||||
taskType=request.get_json().get('taskType')
|
||||
|
||||
|
||||
|
||||
#区分任务,分别选择不同的myreorder
|
||||
if MyReorder:
|
||||
print('dell mydiffer')
|
||||
del MyReorder
|
||||
|
||||
#oriSenList=GetSenList(myinput,modelType)
|
||||
if taskType=='Regression':
|
||||
MyReorder=ReorderByGroupCovid(myinput,RNNModel)
|
||||
else:
|
||||
MyReorder=ReorderByGroup(myinput,RNNModel)
|
||||
|
||||
oriSenList=MyReorder.senList
|
||||
#先将当前的输入转化为list,一方面作为list传递给前端
|
||||
|
||||
#另一方面传递给机器模型,让其进行预测
|
||||
res=MyReorder.oriRes
|
||||
|
||||
#myreorder这个类获取了全局可解释性和局部可解释性的信息,
|
||||
#通过局部可解释性的算法抽选出orderline1--关键的几个子句
|
||||
globalDataZip,localDataZip,orderLineZip = MyReorder.GetImportanceByColor()
|
||||
|
||||
localDifferCount=localDataZip['senDifferCount']
|
||||
# print('localDifferCount',localDifferCount)
|
||||
globalDifferCount=globalDataZip['differCount']
|
||||
|
||||
# maxDiffer=0
|
||||
# for num in differCount:
|
||||
# if abs(num)>maxDiffer:
|
||||
# maxDiffer=abs(num)
|
||||
|
||||
sentenDetail=[] #MyReorder.GetDeatail()
|
||||
|
||||
|
||||
|
||||
emotion='pos'
|
||||
|
||||
if taskType=='Classification':
|
||||
|
||||
# res=res.tolist()
|
||||
|
||||
if(res[0]>res[1]):
|
||||
emotion='pos'
|
||||
else:
|
||||
emotion='neg'
|
||||
|
||||
#区分任务选择MyDiffer类
|
||||
|
||||
if MyReorder:
|
||||
print('dell mydiffer')
|
||||
del MyDiffer
|
||||
|
||||
if taskType=='Regression':
|
||||
MyDiffer=GetDifferCovid(myinput,RNNModel)
|
||||
else:
|
||||
MyDiffer=GetDiffer(myinput,RNNModel)
|
||||
|
||||
|
||||
#MyDiffer类使用了遗传算法,获取一个输入中最为关键的子句排序,
|
||||
# 并根据最大重复子句确定orderline2
|
||||
reorderRes,reorderInd,orderLine1,orderLine2=MyDiffer.GetDiffOrder()
|
||||
|
||||
#此处两个方法没有封装,将orderline中的数据映射到数据集上
|
||||
|
||||
if taskType=='Regression':
|
||||
trainDataZip={}
|
||||
# trainDataZip = CastToTrainWithOrderCovid(orderLine1,orderLine2,emotion,globalDifferCount)
|
||||
orderLineInfoZip = GetOrderLineDetailCovid(orderLine1,orderLine2,oriSenList,globalDifferCount,localDifferCount,RNNModel)
|
||||
|
||||
else:
|
||||
trainDataZip={}
|
||||
trainDataZip,allDataFile = CastToTrainWithOrder(orderLine1,orderLine2,emotion,globalDifferCount)
|
||||
orderLineInfoZip = GetOrderLineDetail(orderLine1,orderLine2,oriSenList,globalDifferCount,localDifferCount,RNNModel)
|
||||
|
||||
print('trainDataZip',trainDataZip)
|
||||
|
||||
out_data={"wordList":oriSenList,"res":res,'sentenDetail':sentenDetail,'globalDataZip':globalDataZip,'localDataZip':localDataZip,'orderLineZip':orderLineZip,"reorderRes":reorderRes,"reorderInd":reorderInd,'trainDataZip':trainDataZip,'orderLineInfoZip':orderLineInfoZip,'allDataFile':allDataFile}
|
||||
|
||||
|
||||
# print(out_data)
|
||||
|
||||
return json.dumps(out_data)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@app.route('/getPredict',methods=['POST']) #第一个参数是路由,第二个是请求方法
|
||||
def getPredict():
|
||||
#该方法对前端传来的用户reorder的输入序列进行预测
|
||||
global MyReorder
|
||||
nowList = request.get_json().get('nowList') #得到前端传送的数据
|
||||
# rnnType = request.get_json().get('rnnType')
|
||||
# taskType=request.get_json().get('taskType')
|
||||
#根据当前的任务返回predict算法的结果
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
prediction=RNNModel.Predict(' '.join(nowList))
|
||||
|
||||
res=prediction
|
||||
|
||||
|
||||
|
||||
out_data={"res":res}
|
||||
print('res!!',res)
|
||||
|
||||
return json.dumps(out_data)
|
||||
|
||||
|
||||
def ToRun(rnnMo):
|
||||
RNNModel=rnnMo
|
||||
app.run(host="0.0.0.0",port=5000)
|
||||
|
||||
|
||||
# if __name__ == "__main__":
|
||||
# app.run(host="0.0.0.0",port=5000)
|
||||
# #使用pywsigi代理flask,提升稳定性
|
||||
# server = pywsgi.WSGIServer(('0.0.0.0', 5000), app)
|
||||
# server.serve_forever()
|
|
@ -0,0 +1,618 @@
|
|||
import numpy as np
|
||||
import tensorflow as tf
|
||||
import keras
|
||||
import re
|
||||
import copy
|
||||
from random import shuffle
|
||||
from random import uniform
|
||||
import math
|
||||
# wordsList=np.load('./VectorList/wordsList.npy')
|
||||
# wordsList = wordsList.tolist() #Originally loaded as numpy array
|
||||
# wordsList = [word.decode('UTF-8') for word in wordsList] #Encode words as UTF-8
|
||||
|
||||
#辅助函数
|
||||
|
||||
#把整个评论切割成子句 输出list
|
||||
def InputToSenList(senten,model):
|
||||
mark=' mark! '
|
||||
#使用正则表达式确定是否要切分
|
||||
stripSpecialChars=re.compile("[^A-Za-z0-9 ]+")
|
||||
#把大写字母改成小写字母
|
||||
senten=senten.lower().replace('<br />','')
|
||||
#print(senten)
|
||||
#把所有的标点符号更换为mark
|
||||
subSenList=[]
|
||||
|
||||
if model=='clause':
|
||||
myinput=re.sub(stripSpecialChars,mark,senten)
|
||||
#wordVec保存的是token,即单词
|
||||
wordVec=myinput.split()
|
||||
|
||||
#markLoc保存mark!的位置,这就是标点符号的位置,作为切分子句的依据
|
||||
markLoc=[]
|
||||
markLoc.append(0)
|
||||
|
||||
shiftNum=0
|
||||
for i in range(len(wordVec)):
|
||||
if wordVec[i-shiftNum]=='mark!':
|
||||
markLoc.append(i-shiftNum)
|
||||
wordVec.pop(i-shiftNum)
|
||||
shiftNum+=1
|
||||
|
||||
#按照标点符号划分子句,把每个子句放入subSenList
|
||||
for i in range(len(markLoc)-1):
|
||||
subSenList.append(" ".join(wordVec[markLoc[i]:markLoc[i+1]]))
|
||||
else:
|
||||
myinput=re.sub(stripSpecialChars,' ',senten)
|
||||
#wordVec保存的是token,即单词
|
||||
subSenList=myinput.split()
|
||||
|
||||
return subSenList
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#把list转化为词嵌入向量的comment
|
||||
def ListToVecComment(tempSenList):
|
||||
global wordsList
|
||||
|
||||
comment=np.zeros([batchSize,maxSeqLength])
|
||||
#comment保存的评论,每个字都是用字典中的数字组成的
|
||||
|
||||
|
||||
fullSent=' '.join(tempSenList)
|
||||
|
||||
counter=0
|
||||
|
||||
for word in fullSent.split():
|
||||
try:
|
||||
comment[0][counter]=wordsList.index(word)
|
||||
except Exception:
|
||||
comment[0][counter]=399999
|
||||
counter+=1
|
||||
if counter==250:
|
||||
break
|
||||
|
||||
return comment
|
||||
|
||||
|
||||
|
||||
def CountWordNum(strs):
|
||||
count=1
|
||||
for word in strs:
|
||||
if word==' ':
|
||||
count+=1
|
||||
|
||||
return count
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def GetSenList(myinput,model='clause'):
|
||||
myinput+='. '
|
||||
senList=InputToSenList(myinput)
|
||||
|
||||
if(model=='word'):
|
||||
fullSent=' '.join(senList)
|
||||
senList=fullSent.split()
|
||||
|
||||
return senList
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
maxSeqLength=250
|
||||
|
||||
batchSize = 24
|
||||
lstmUnits = 64
|
||||
numClasses = 2
|
||||
numDimensions = 50 #Dimensions for each word vector
|
||||
iterations = 1000 #100000
|
||||
learnRate=0
|
||||
|
||||
|
||||
#辅助函数
|
||||
from random import randint
|
||||
|
||||
class PredictRNN():
|
||||
def __init__(self,rnnType):
|
||||
tf.reset_default_graph()
|
||||
self.inputData,self.value,self.weight,bias=self.DefVar(rnnType)
|
||||
self.predicr=self.DefPreFun(self.value,self.weight,bias)
|
||||
|
||||
|
||||
|
||||
if(rnnType=='GRU'):
|
||||
self.savePath='./modelsMoreGRU/pretrained_gru.ckpt-130000'
|
||||
elif(rnnType=='vanilla'):
|
||||
self.savePath='./modelsMoreVanilla/pretrained_gru.ckpt-500000'
|
||||
else:
|
||||
self.savePath='./modelsMoreMid/pretrained_lstm.ckpt-290000'
|
||||
|
||||
|
||||
|
||||
#定义变量
|
||||
def DefVar(self,rnnType):
|
||||
|
||||
wordVectors = np.load('./VectorList/wordVectors.npy')
|
||||
|
||||
keras.backend.clear_session()
|
||||
tf.reset_default_graph()
|
||||
#24条评论,每条评论200个长度,这时候每个字还是用id来表示的
|
||||
inputData = tf.placeholder(tf.int32, [batchSize, maxSeqLength])
|
||||
|
||||
|
||||
|
||||
data = tf.Variable(tf.zeros([batchSize, maxSeqLength, numDimensions]),dtype = tf.float32)
|
||||
#根据嵌入向量,把用id表示的单词转化为对应的词嵌入向量
|
||||
data = tf.nn.embedding_lookup(wordVectors,inputData)
|
||||
|
||||
|
||||
#lstmUnits决定了每个隐藏层输出的数量,这里输出为64维,输入为50维,权重矩阵就是64*114
|
||||
lstmCell = tf.contrib.rnn.BasicLSTMCell(lstmUnits)
|
||||
if(rnnType=='GRU'):
|
||||
lstmCell = tf.contrib.rnn.GRUCell(lstmUnits)
|
||||
elif(rnnType=='vanilla'):
|
||||
lstmCell = tf.contrib.rnn.BasicRNNCell(lstmUnits)
|
||||
|
||||
#lstmCell = tf.contrib.rnn.DropoutWrapper(cell=lstmCell, output_keep_prob=0.75)
|
||||
#value 24*200*64 每次训练抽取了24条评论,
|
||||
# 每条评论200个输入,就是200个隐藏神经元,每个神经元输出是64维
|
||||
initial_state = lstmCell.zero_state(batchSize, tf.float32)
|
||||
|
||||
value, _ = tf.nn.dynamic_rnn(lstmCell,data, initial_state=initial_state,dtype=tf.float32)
|
||||
|
||||
# _ 两部分一部分是24 *64 24条文本,64维的最后一个神经元的cell state的输出
|
||||
# 第二部分是24*64 64维的最后一个神经元的hidden state输出
|
||||
|
||||
|
||||
#产生一个 64*2的随机矩阵
|
||||
weight = tf.Variable(tf.truncated_normal([lstmUnits,numClasses]))
|
||||
|
||||
#构造一个二位数组[0.1,0.1]
|
||||
bias = tf.Variable(tf.constant(0.1, shape=[numClasses]))
|
||||
|
||||
#把数组从维度上转置,变成了200*24*64 ,200神经元,24个样例,64个输出
|
||||
#value = tf.transpose(value,[1,0,2])
|
||||
|
||||
|
||||
# ori=tf.gather(value,0)
|
||||
|
||||
return inputData,value,weight,bias # ,ori
|
||||
|
||||
|
||||
|
||||
def DefPreFun(self,value,weight,bias):
|
||||
#取出最后一个神经元的数据 24*64
|
||||
value = tf.transpose(value,[1,0,2])
|
||||
last=tf.gather(value,int(value.get_shape()[0])-1)
|
||||
|
||||
# 64维的向量映射到二分类的问题上,加上b
|
||||
prediction= (tf.matmul(last,weight)+bias)
|
||||
|
||||
#prediction=tf.argmax(prediction,1)
|
||||
|
||||
|
||||
output = tf.nn.softmax(prediction)
|
||||
return output
|
||||
|
||||
|
||||
def Predict(self,tempSenList):
|
||||
|
||||
comment=ListToVecComment(tempSenList)
|
||||
|
||||
with tf.Session() as session:
|
||||
saver=tf.train.Saver()
|
||||
saver.restore(session,self.savePath)
|
||||
res=session.run(self.predicr, {self.inputData: comment})
|
||||
|
||||
return res[0]
|
||||
|
||||
def GetRes(self,comment):
|
||||
with tf.Session() as session:
|
||||
saver=tf.train.Saver()
|
||||
saver.restore(session,self.savePath)
|
||||
res=session.run(self.predicr, {self.inputData: comment})
|
||||
|
||||
return res
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
class GetDiffer():
|
||||
|
||||
def __init__(self,myinput,RNNModel):
|
||||
#拆分好的句子的list
|
||||
self.senList=InputToSenList(myinput,'clause')
|
||||
#拆分好的单词的list
|
||||
self.wordList=InputToSenList(myinput,'word')
|
||||
#根据选择的句子/单词 model 确定的list
|
||||
|
||||
#计算到的list长度
|
||||
self.sentenSize=len(self.senList)
|
||||
|
||||
|
||||
#定义好的RNN
|
||||
# self.PreRNN=PredictRNN()
|
||||
self.PreRNN=RNNModel
|
||||
#原序列的预测值
|
||||
#使用者的模型中必须有一个predict的方法
|
||||
self.oriRes=self.PreRNN.Predict(' '.join(self.senList))
|
||||
self.iterations=100
|
||||
#设置循环的次数,建议是总的子句数量*整体循环次数
|
||||
|
||||
|
||||
#用来寻找偏差最大的序列
|
||||
def GetDiffOrder(self):
|
||||
|
||||
|
||||
|
||||
indexOrder1= np.arange(start=0,stop=self.sentenSize,dtype=np.int)
|
||||
|
||||
|
||||
indexOrder1=indexOrder1.reshape(1,-1)
|
||||
indexOrder1=np.repeat(indexOrder1,batchSize,axis=0)
|
||||
#24*子句长度
|
||||
# indexOrder2=copy.deepcopy(indexOrder1)
|
||||
|
||||
|
||||
|
||||
fitness=np.zeros(batchSize)
|
||||
|
||||
allRes=None
|
||||
allIndexOrder=None
|
||||
|
||||
iterations=15
|
||||
for i in range(iterations):
|
||||
indexOrder1 = self.Variate(indexOrder1,i)
|
||||
|
||||
# if i!=(iterations-1):
|
||||
|
||||
indexOrder1=self.OX(indexOrder1)
|
||||
|
||||
|
||||
comment1=self.IndexToInput(indexOrder1)
|
||||
|
||||
res1=[]
|
||||
|
||||
for i in range(len(comment1)):
|
||||
res1.append(self.PreRNN.Predict(comment1[i]))
|
||||
|
||||
res1=np.array(res1)
|
||||
|
||||
|
||||
|
||||
|
||||
allRes = np.array(res1)
|
||||
allIndexOrder = np.array(indexOrder1)
|
||||
|
||||
#计算适应度函数
|
||||
#偏差值*20-逆序数
|
||||
for j in range(batchSize):
|
||||
#todo 设计一个函数计算allres里面的差值
|
||||
calDif=0
|
||||
oneRes=allRes[j]
|
||||
|
||||
for i in range(len(oneRes)):
|
||||
calDif+=abs(oneRes[i]-self.oriRes[i])
|
||||
|
||||
calDif/=len(oneRes)
|
||||
|
||||
fitness[j] = calDif #(res[j][0]-oriRes[0])*30
|
||||
#fitness[j] -= InverNum(indexOrder[j])
|
||||
|
||||
|
||||
|
||||
totalRan=0
|
||||
|
||||
|
||||
|
||||
for num in fitness:
|
||||
totalRan+=num
|
||||
|
||||
for j in range(batchSize):
|
||||
ranNum=uniform(0,totalRan)
|
||||
ranCount=0
|
||||
for k in range(len(fitness)):
|
||||
ranCount+=fitness[k]
|
||||
if(ranCount>=ranNum):
|
||||
|
||||
indexOrder1[j]=allIndexOrder[k]
|
||||
break
|
||||
|
||||
|
||||
# else:
|
||||
# indexOrder=fatherIndexOrder
|
||||
|
||||
|
||||
|
||||
|
||||
comment1=self.IndexToInput(indexOrder1)
|
||||
|
||||
res1=[]
|
||||
|
||||
for i in range(len(comment1)):
|
||||
res1.append(self.PreRNN.Predict(comment1[i]))
|
||||
|
||||
res1=np.array(res1)
|
||||
|
||||
|
||||
|
||||
allRes = np.array(res1)
|
||||
allIndexOrder = np.array(indexOrder1)
|
||||
|
||||
for j in range(batchSize):
|
||||
#todo 设计一个函数计算allres里面的差值
|
||||
calDif=0
|
||||
oneRes=allRes[j]
|
||||
|
||||
for i in range(len(oneRes)):
|
||||
calDif+=abs(oneRes[i]-self.oriRes[i])
|
||||
|
||||
calDif/=len(oneRes)
|
||||
|
||||
fitness[j] = calDif
|
||||
|
||||
|
||||
|
||||
for j in range(1, batchSize):
|
||||
for k in range(0, batchSize - j ):
|
||||
if fitness[k] > fitness[k+1]:
|
||||
fitness[k],fitness[k+1] = fitness[k+1],fitness[k]
|
||||
allRes[[k,k+1], :] = allRes[[k+1,k], :]
|
||||
allIndexOrder[[k,k+1], :] = allIndexOrder[[k+1,k], :]
|
||||
|
||||
|
||||
# allRes[k], allRes[k+1] = allRes[k+1], allRes[k]
|
||||
# allIndexOrder[k], allIndexOrder[k+1] = allIndexOrder[k+1], allIndexOrder[k]
|
||||
|
||||
|
||||
|
||||
reorderRes=[]
|
||||
reorderInd=[]
|
||||
|
||||
|
||||
|
||||
|
||||
# 只记结果
|
||||
|
||||
|
||||
repeat=[]
|
||||
count=0
|
||||
for i in range(len(allRes)):
|
||||
if(fitness[i] not in repeat):
|
||||
repeat.append(fitness[i])
|
||||
reorderRes.append(allRes[i].tolist())
|
||||
reorderInd.append(allIndexOrder[i].tolist())
|
||||
count+=1
|
||||
if count==6:
|
||||
break
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
orderLine1=self.GetGlobalOrderLine(indexOrder1.tolist())
|
||||
orderLine2=[]
|
||||
# orderLine2=self.GetGlobalOrderLine(indexOrder2.tolist())
|
||||
orderLine=orderLine1
|
||||
|
||||
print('orderLine',orderLine)
|
||||
|
||||
return reorderRes,reorderInd,orderLine1,orderLine2
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#进行变异操作
|
||||
def Variate(self,indexOrder,iterations):
|
||||
|
||||
theOrder=indexOrder
|
||||
for i in range(batchSize):
|
||||
if iterations==0:
|
||||
randChoice=randint(1,20)
|
||||
else:
|
||||
randChoice=randint(1,5)
|
||||
|
||||
if randChoice>=5:
|
||||
# newOrder = np.zeros(len(theOrder[i]))
|
||||
smaLoc = randint(0,self.sentenSize-1)
|
||||
bigLoc = randint(0,self.sentenSize-1)
|
||||
|
||||
if smaLoc>bigLoc:
|
||||
smaLoc,bigLoc = bigLoc,smaLoc
|
||||
|
||||
|
||||
temp=theOrder[i][smaLoc]
|
||||
theOrder[i][smaLoc:bigLoc]=theOrder[i][smaLoc+1:bigLoc+1]
|
||||
theOrder[i][bigLoc]=temp
|
||||
|
||||
return theOrder
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def OX(self,fatherIndexOrder):
|
||||
childIndexOrder=np.zeros([batchSize,self.sentenSize])
|
||||
|
||||
|
||||
|
||||
for i in range(int(batchSize/2)):
|
||||
# father1Loc=i*2
|
||||
# father2Loc=i*2+1
|
||||
father1Loc=i
|
||||
father2Loc=int(i+batchSize/2)
|
||||
randChoice=randint(1,20)
|
||||
|
||||
if randChoice==1:
|
||||
childIndexOrder[father1Loc] = fatherIndexOrder[father1Loc]
|
||||
childIndexOrder[father2Loc] = fatherIndexOrder[father2Loc]
|
||||
else:
|
||||
father1 = fatherIndexOrder[father1Loc]
|
||||
father2 = fatherIndexOrder[father2Loc]
|
||||
|
||||
smaLoc = randint(0,self.sentenSize-1)
|
||||
bigLoc = randint(0,self.sentenSize-1)
|
||||
|
||||
if smaLoc>bigLoc:
|
||||
smaLoc,bigLoc = bigLoc,smaLoc
|
||||
|
||||
childIndexOrder[father1Loc][smaLoc:bigLoc+1]=father1[smaLoc:bigLoc+1]
|
||||
|
||||
childLoc=0
|
||||
|
||||
|
||||
for num in father2:
|
||||
if childLoc == smaLoc:
|
||||
childLoc = bigLoc+1
|
||||
if childLoc >= self.sentenSize:
|
||||
break
|
||||
|
||||
if num in father1[smaLoc:bigLoc+1]:
|
||||
continue
|
||||
|
||||
childIndexOrder[father1Loc][childLoc]=num
|
||||
|
||||
childLoc+=1
|
||||
|
||||
|
||||
|
||||
|
||||
father1 = fatherIndexOrder[father2Loc]
|
||||
father2 = fatherIndexOrder[father1Loc]
|
||||
|
||||
|
||||
smaLoc = randint(0,self.sentenSize-1)
|
||||
bigLoc = randint(0,self.sentenSize-1)
|
||||
|
||||
if smaLoc>bigLoc:
|
||||
smaLoc,bigLoc = bigLoc,smaLoc
|
||||
|
||||
childIndexOrder[father2Loc][smaLoc:bigLoc+1]=father1[smaLoc:bigLoc+1]
|
||||
|
||||
childLoc=0
|
||||
|
||||
for num in father2:
|
||||
if childLoc == smaLoc:
|
||||
childLoc = bigLoc+1
|
||||
if childLoc >= self.sentenSize:
|
||||
break
|
||||
|
||||
if num in father1[smaLoc:bigLoc+1]:
|
||||
continue
|
||||
|
||||
childIndexOrder[father2Loc][childLoc]=num
|
||||
childLoc+=1
|
||||
|
||||
return childIndexOrder
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#将index转化为input进行处理
|
||||
def IndexToInput(self,indexOrder):
|
||||
#记录所有的评论,这些评论是原来的文字
|
||||
comment=[]
|
||||
#comment保存的评论,每个字都是用字典中的数字组成的
|
||||
|
||||
for i in range(batchSize):
|
||||
allsub=[]
|
||||
for index in indexOrder[i]:
|
||||
allsub.append(self.senList[int(index)])
|
||||
# TOFix!!!!!!!
|
||||
fullSent=' '.join(allsub)
|
||||
|
||||
|
||||
comment.append(fullSent)
|
||||
|
||||
|
||||
|
||||
return comment
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def GetGlobalOrderLine(self,indexOrder):
|
||||
orderLine=[]
|
||||
# print('indx',indexOrder)
|
||||
lenth=len(indexOrder[0])
|
||||
|
||||
threHold=9
|
||||
|
||||
for sen in range(batchSize-threHold):#检查所有的句子
|
||||
|
||||
for i in range(lenth):#选定某个句子后检查所有的部分
|
||||
for j in range(i+2,lenth+1):
|
||||
count=0
|
||||
target=indexOrder[sen][i:j]
|
||||
# print('tar',target)
|
||||
for k in range(1,batchSize):
|
||||
searchOrder=indexOrder[k]
|
||||
for l in range(lenth+i-j):
|
||||
# if indexOrder[i:j] == searchOrder[l:l-i+j]:
|
||||
# print('tar',target)
|
||||
# print('searc',searchOrder[l:l-i+j])
|
||||
if (target == searchOrder[l:l-i+j]):
|
||||
# print('searchOrd',searchOrder[l:l-i+j])
|
||||
count+=1
|
||||
break
|
||||
|
||||
#如果重复率大于10
|
||||
# if count>=threHold:
|
||||
# print(target)
|
||||
|
||||
if count>3 and count>(15-len(target)*3):
|
||||
isRepeat=False
|
||||
|
||||
|
||||
i=0
|
||||
|
||||
while i < len(orderLine):
|
||||
line = orderLine[i]
|
||||
d = [False for c in target if c not in line]
|
||||
if not d:
|
||||
isRepeat=True
|
||||
break
|
||||
|
||||
d = [False for c in line if c not in target]
|
||||
if not d:
|
||||
print('remove!')
|
||||
i-=1
|
||||
orderLine.remove(line)
|
||||
|
||||
i+=1
|
||||
|
||||
if not isRepeat:
|
||||
orderLine.append(target)
|
||||
|
||||
else:
|
||||
break
|
||||
|
||||
|
||||
|
||||
return orderLine
|
||||
|
||||
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
myinput="This is the worst movie ever made. Ever. It beats everything. I have never seen worse. Retire the trophy and give it to these people.....there's just no comparison.<br /><br />Even three days after watching this (for some reason I still don't know why) I cannot believe how insanely horrific this movie is/was. Its so bad. So far from anything that could be considered a movie, a story or anything that should have ever been created and brought into our existence.<br /><br />This made me question whether or not humans are truly put on this earth to do good. It made me feel disgusted with ourselves and our progress as a species in this universe. This type of movie sincerely hurts us as a society."
|
||||
# myinput=input("输入")
|
||||
|
||||
oriSenList=InputToSenList(myinput)
|
|
@ -0,0 +1,519 @@
|
|||
import pandas as pd
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
import keras
|
||||
from sklearn.preprocessing import MinMaxScaler
|
||||
import re
|
||||
import copy
|
||||
from random import shuffle
|
||||
from random import uniform
|
||||
import math
|
||||
|
||||
#辅助函数
|
||||
#myinput必须是string类型
|
||||
def InputToSenList(senten,model):
|
||||
mark=' mark! '
|
||||
#使用正则表达式确定是否要切分
|
||||
stripSpecialChars=re.compile("[^A-Za-z0-9 ]+")
|
||||
#把大写字母改成小写字母
|
||||
senten=senten.lower().replace('<br />','')
|
||||
#print(senten)
|
||||
#把所有的标点符号更换为mark
|
||||
subSenList=[]
|
||||
|
||||
if model=='clause':
|
||||
myinput=re.sub(stripSpecialChars,mark,senten)
|
||||
#wordVec保存的是token,即单词
|
||||
wordVec=myinput.split()
|
||||
|
||||
#markLoc保存mark!的位置,这就是标点符号的位置,作为切分子句的依据
|
||||
markLoc=[]
|
||||
markLoc.append(0)
|
||||
|
||||
shiftNum=0
|
||||
for i in range(len(wordVec)):
|
||||
if wordVec[i-shiftNum]=='mark!':
|
||||
markLoc.append(i-shiftNum)
|
||||
wordVec.pop(i-shiftNum)
|
||||
shiftNum+=1
|
||||
|
||||
#按照标点符号划分子句,把每个子句放入subSenList
|
||||
for i in range(len(markLoc)-1):
|
||||
subSenList.append(" ".join(wordVec[markLoc[i]:markLoc[i+1]]))
|
||||
else:
|
||||
myinput=re.sub(stripSpecialChars,' ',senten)
|
||||
#wordVec保存的是token,即单词
|
||||
subSenList=myinput.split()
|
||||
|
||||
return subSenList
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#把list转化为词嵌入向量的comment
|
||||
def ListToVecComment(tempSenList):
|
||||
global wordsList
|
||||
|
||||
comment=np.zeros([batchSize,maxSeqLength])
|
||||
#comment保存的评论,每个字都是用字典中的数字组成的
|
||||
|
||||
|
||||
fullSent=' '.join(tempSenList)
|
||||
|
||||
counter=0
|
||||
|
||||
for word in fullSent.split():
|
||||
try:
|
||||
comment[0][counter]=wordsList.index(word)
|
||||
except Exception:
|
||||
comment[0][counter]=399999
|
||||
counter+=1
|
||||
if counter==250:
|
||||
break
|
||||
|
||||
return comment
|
||||
|
||||
|
||||
|
||||
def CountWordNum(strs):
|
||||
count=1
|
||||
for word in strs:
|
||||
if word==' ':
|
||||
count+=1
|
||||
|
||||
return count
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def GetSenList(myinput,model='clause'):
|
||||
|
||||
senList=[]
|
||||
#只预测一天,只要最后十五天的数据
|
||||
tempList=myinput.split()[-15:]
|
||||
|
||||
if model=='word':
|
||||
senList=tempList
|
||||
else:
|
||||
senten = ''
|
||||
count = 0
|
||||
for number in tempList:
|
||||
senten += str(number)+' '
|
||||
count += 1
|
||||
if(count>=3):
|
||||
senList.append(senten)
|
||||
senten=''
|
||||
count=0
|
||||
|
||||
if senten:
|
||||
senList.append(senten)
|
||||
|
||||
|
||||
return senList
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
maxSeqLength=250
|
||||
|
||||
batchSize = 24
|
||||
lstmUnits = 64
|
||||
numClasses = 2
|
||||
numDimensions = 50 #Dimensions for each word vector
|
||||
iterations = 1000 #100000
|
||||
learnRate=0
|
||||
|
||||
|
||||
#辅助函数
|
||||
from random import randint
|
||||
|
||||
|
||||
|
||||
|
||||
class GetDifferCovid():
|
||||
def __init__(self,myinput,RNNModel):
|
||||
#拆分好的句子的list
|
||||
self.senList=InputToSenList(myinput,'clause')
|
||||
#拆分好的单词的list
|
||||
self.wordList=InputToSenList(myinput,'word')
|
||||
#根据选择的句子/单词 model 确定的list
|
||||
|
||||
#计算到的list长度
|
||||
self.sentenSize=len(self.senList)
|
||||
|
||||
|
||||
#定义好的RNN
|
||||
# self.PreRNN=PredictRNN()
|
||||
self.PreRNN=RNNModel
|
||||
#原序列的预测值
|
||||
#使用者的模型中必须有一个predict的方法
|
||||
self.oriRes=self.PreRNN.Predict(' '.join(self.senList))
|
||||
self.iterations=100
|
||||
#设置循环的次数,建议是总的子句数量*整体循环次数
|
||||
|
||||
|
||||
#用来寻找偏差最大的序列
|
||||
def GetDiffOrder(self):
|
||||
|
||||
|
||||
|
||||
indexOrder1= np.arange(start=0,stop=self.sentenSize,dtype=np.int)
|
||||
|
||||
|
||||
indexOrder1=indexOrder1.reshape(1,-1)
|
||||
indexOrder1=np.repeat(indexOrder1,batchSize,axis=0)
|
||||
#24*子句长度
|
||||
indexOrder2=copy.deepcopy(indexOrder1)
|
||||
|
||||
|
||||
|
||||
fitness=np.zeros(batchSize*2)
|
||||
|
||||
allRes=None
|
||||
allIndexOrder=None
|
||||
|
||||
iterations=15
|
||||
for i in range(iterations):
|
||||
indexOrder1 = self.Variate(indexOrder1,i)
|
||||
indexOrder2 = self.Variate(indexOrder2,i)
|
||||
|
||||
# if i!=(iterations-1):
|
||||
|
||||
indexOrder1=self.OX(indexOrder1)
|
||||
indexOrder2=self.OX(indexOrder2)
|
||||
|
||||
|
||||
comment1=self.IndexToInput(indexOrder1)
|
||||
|
||||
res1=np.arange(start=0,stop=batchSize,dtype=np.float32)
|
||||
for i in range(len(comment1)):
|
||||
res1[i]=self.PreRNN.Predict(comment1[i])
|
||||
|
||||
|
||||
|
||||
comment2=self.IndexToInput(indexOrder2)
|
||||
|
||||
res2=np.arange(start=0,stop=batchSize,dtype=np.float32)
|
||||
for i in range(len(comment2)):
|
||||
res1[i]=self.PreRNN.Predict(comment2[i])
|
||||
|
||||
allRes = np.concatenate((res1,res2))
|
||||
allIndexOrder = np.concatenate((indexOrder1,indexOrder2))
|
||||
|
||||
#计算适应度函数
|
||||
#偏差值*20-逆序数
|
||||
for j in range(batchSize*2):
|
||||
fitness[j] = (allRes[j]-self.oriRes) #(res[j][0]-oriRes[0])*30
|
||||
#fitness[j] -= InverNum(indexOrder[j])
|
||||
|
||||
|
||||
|
||||
totalRan=0
|
||||
|
||||
|
||||
|
||||
for re in allRes:
|
||||
totalRan+=(abs(re-self.oriRes))
|
||||
|
||||
for j in range(batchSize):
|
||||
ranNum=uniform(0,totalRan)
|
||||
ranCount=0
|
||||
for k in range(len(allRes)):
|
||||
ranCount+=(abs(allRes[k]-self.oriRes))
|
||||
if(ranCount>=ranNum):
|
||||
|
||||
indexOrder1[j]=allIndexOrder[k]
|
||||
break
|
||||
|
||||
|
||||
|
||||
for j in range(batchSize*2):
|
||||
fitness[j] = (self.oriRes-allRes[j])
|
||||
#(res[j][0]-oriRes[0])*30
|
||||
#fitness[j] -= InverNum(indexOrder[j])
|
||||
|
||||
|
||||
|
||||
totalRan=0
|
||||
|
||||
|
||||
|
||||
for re in allRes:
|
||||
totalRan+=(abs(re-self.oriRes))
|
||||
|
||||
for j in range(batchSize):
|
||||
ranNum=uniform(0,totalRan)
|
||||
ranCount=0
|
||||
for k in range(len(allRes)):
|
||||
ranCount+=(abs(allRes[k]-self.oriRes))
|
||||
if(ranCount>=ranNum):
|
||||
indexOrder2[j]=allIndexOrder[k]
|
||||
break
|
||||
|
||||
|
||||
# else:
|
||||
# indexOrder=fatherIndexOrder
|
||||
|
||||
|
||||
|
||||
|
||||
comment1=self.IndexToInput(indexOrder1)
|
||||
|
||||
res1=np.arange(start=0,stop=batchSize,dtype=np.float32)
|
||||
for i in range(len(comment1)):
|
||||
res1[i]=self.PreRNN.Predict(comment1[i])
|
||||
|
||||
comment2=self.IndexToInput(indexOrder2)
|
||||
|
||||
|
||||
res2=np.arange(start=0,stop=batchSize,dtype=np.float32)
|
||||
for i in range(len(comment2)):
|
||||
res1[i]=self.PreRNN.Predict(comment2[i])
|
||||
|
||||
|
||||
allRes = np.concatenate((res1,res2))
|
||||
allIndexOrder = np.concatenate((indexOrder1,indexOrder2))
|
||||
|
||||
|
||||
for j in range(1, batchSize*2):
|
||||
for k in range(0, batchSize*2 - j ):
|
||||
if allRes[k]> allRes[k+1]:
|
||||
allRes[k],allRes[k+1] = allRes[k+1],allRes[k]
|
||||
allIndexOrder[[k,k+1], :] = allIndexOrder[[k+1,k], :]
|
||||
|
||||
|
||||
# allRes[k], allRes[k+1] = allRes[k+1], allRes[k]
|
||||
# allIndexOrder[k], allIndexOrder[k+1] = allIndexOrder[k+1], allIndexOrder[k]
|
||||
|
||||
|
||||
|
||||
reorderRes=[]
|
||||
reorderInd=[]
|
||||
|
||||
|
||||
# print('after')
|
||||
# for index in allIndexOrder:
|
||||
# print(index)
|
||||
|
||||
|
||||
# 只记结果
|
||||
|
||||
repeat=[]
|
||||
for i in range(len(allRes)):
|
||||
if(allRes[i] not in repeat):
|
||||
repeat.append(allRes[i])
|
||||
reorderRes.append(allRes[i].tolist())
|
||||
reorderInd.append(allIndexOrder[i].tolist())
|
||||
|
||||
|
||||
orderLine1=self.GetGlobalOrderLine(indexOrder1.tolist())
|
||||
orderLine2=self.GetGlobalOrderLine(indexOrder2.tolist())
|
||||
orderLine=orderLine1+orderLine2
|
||||
|
||||
print(orderLine)
|
||||
|
||||
return reorderRes,reorderInd,orderLine1,orderLine2
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#进行变异操作
|
||||
def Variate(self,indexOrder,iterations):
|
||||
|
||||
theOrder=indexOrder
|
||||
for i in range(batchSize):
|
||||
if iterations==0:
|
||||
randChoice=randint(1,20)
|
||||
else:
|
||||
randChoice=randint(1,5)
|
||||
|
||||
if randChoice>=5:
|
||||
# newOrder = np.zeros(len(theOrder[i]))
|
||||
smaLoc = randint(0,self.sentenSize-1)
|
||||
bigLoc = randint(0,self.sentenSize-1)
|
||||
|
||||
if smaLoc>bigLoc:
|
||||
smaLoc,bigLoc = bigLoc,smaLoc
|
||||
|
||||
|
||||
temp=theOrder[i][smaLoc]
|
||||
theOrder[i][smaLoc:bigLoc]=theOrder[i][smaLoc+1:bigLoc+1]
|
||||
theOrder[i][bigLoc]=temp
|
||||
|
||||
return theOrder
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def OX(self,fatherIndexOrder):
|
||||
childIndexOrder=np.zeros([batchSize,self.sentenSize])
|
||||
|
||||
|
||||
|
||||
for i in range(int(batchSize/2)):
|
||||
# father1Loc=i*2
|
||||
# father2Loc=i*2+1
|
||||
father1Loc=i
|
||||
father2Loc=int(i+batchSize/2)
|
||||
randChoice=randint(1,20)
|
||||
|
||||
if randChoice==1:
|
||||
childIndexOrder[father1Loc] = fatherIndexOrder[father1Loc]
|
||||
childIndexOrder[father2Loc] = fatherIndexOrder[father2Loc]
|
||||
else:
|
||||
father1 = fatherIndexOrder[father1Loc]
|
||||
father2 = fatherIndexOrder[father2Loc]
|
||||
|
||||
smaLoc = randint(0,self.sentenSize-1)
|
||||
bigLoc = randint(0,self.sentenSize-1)
|
||||
|
||||
if smaLoc>bigLoc:
|
||||
smaLoc,bigLoc = bigLoc,smaLoc
|
||||
|
||||
childIndexOrder[father1Loc][smaLoc:bigLoc+1]=father1[smaLoc:bigLoc+1]
|
||||
|
||||
childLoc=0
|
||||
|
||||
|
||||
for num in father2:
|
||||
if childLoc == smaLoc:
|
||||
childLoc = bigLoc+1
|
||||
if childLoc >= self.sentenSize:
|
||||
break
|
||||
|
||||
if num in father1[smaLoc:bigLoc+1]:
|
||||
continue
|
||||
|
||||
childIndexOrder[father1Loc][childLoc]=num
|
||||
|
||||
childLoc+=1
|
||||
|
||||
|
||||
|
||||
|
||||
father1 = fatherIndexOrder[father2Loc]
|
||||
father2 = fatherIndexOrder[father1Loc]
|
||||
|
||||
|
||||
smaLoc = randint(0,self.sentenSize-1)
|
||||
bigLoc = randint(0,self.sentenSize-1)
|
||||
|
||||
if smaLoc>bigLoc:
|
||||
smaLoc,bigLoc = bigLoc,smaLoc
|
||||
|
||||
childIndexOrder[father2Loc][smaLoc:bigLoc+1]=father1[smaLoc:bigLoc+1]
|
||||
|
||||
childLoc=0
|
||||
|
||||
for num in father2:
|
||||
if childLoc == smaLoc:
|
||||
childLoc = bigLoc+1
|
||||
if childLoc >= self.sentenSize:
|
||||
break
|
||||
|
||||
if num in father1[smaLoc:bigLoc+1]:
|
||||
continue
|
||||
|
||||
childIndexOrder[father2Loc][childLoc]=num
|
||||
childLoc+=1
|
||||
|
||||
return childIndexOrder
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#将index转化为input进行处理
|
||||
def IndexToInput(self,indexOrder):
|
||||
#记录所有的评论,这些评论是原来的文字
|
||||
comment=[]
|
||||
#comment保存的评论,每个字都是用字典中的数字组成的
|
||||
|
||||
for i in range(batchSize):
|
||||
allsub=[]
|
||||
for index in indexOrder[i]:
|
||||
allsub.append(self.senList[int(index)])
|
||||
# TOFix!!!!!!!
|
||||
fullSent=' '.join(allsub)
|
||||
|
||||
|
||||
comment.append(fullSent)
|
||||
|
||||
|
||||
|
||||
return comment
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def GetGlobalOrderLine(self,indexOrder):
|
||||
orderLine=[]
|
||||
# print('indx',indexOrder)
|
||||
lenth=len(indexOrder[0])
|
||||
|
||||
threHold=9
|
||||
|
||||
for sen in range(batchSize-threHold):#检查所有的句子
|
||||
|
||||
for i in range(lenth):#选定某个句子后检查所有的部分
|
||||
for j in range(i+2,lenth+1):
|
||||
count=0
|
||||
target=indexOrder[sen][i:j]
|
||||
# print('tar',target)
|
||||
for k in range(1,batchSize):
|
||||
searchOrder=indexOrder[k]
|
||||
for l in range(lenth+i-j):
|
||||
# if indexOrder[i:j] == searchOrder[l:l-i+j]:
|
||||
# print('tar',target)
|
||||
# print('searc',searchOrder[l:l-i+j])
|
||||
if (target == searchOrder[l:l-i+j]):
|
||||
# print('searchOrd',searchOrder[l:l-i+j])
|
||||
count+=1
|
||||
break
|
||||
|
||||
#如果重复率大于10
|
||||
if count>3 and count>(15-len(target)*3):
|
||||
isRepeat=False
|
||||
for line in orderLine:
|
||||
d = [False for c in target if c not in line]
|
||||
if not d:
|
||||
isRepeat=True
|
||||
break
|
||||
|
||||
d = [False for c in line if c not in target]
|
||||
if not d:
|
||||
orderLine.remove(line)
|
||||
|
||||
if not isRepeat:
|
||||
orderLine.append(target)
|
||||
|
||||
else:
|
||||
break
|
||||
|
||||
|
||||
|
||||
return orderLine
|
||||
|
||||
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
myinput="44210 50393 43088 31169 23567 27393 34057 36073 47778 41062 34351 34428 39507 39018 45137 49284 42159 38415 51972 "
|
||||
# myinput=input("输入")
|
||||
|
||||
MyGetDiffer=GetDifferCovid(myinput,'clause','judge')
|
||||
|
||||
reorderRes,reorderInd,orderLine1,orderLine2=MyGetDiffer.GetDiffOrder()
|
|
@ -0,0 +1,460 @@
|
|||
from random import shuffle
|
||||
import numpy as np
|
||||
import copy
|
||||
# from Predict import GetSenList,PredictMulti,Predict24
|
||||
import os
|
||||
|
||||
import re
|
||||
|
||||
# negNP=np.load('./EmotionInfo/XplainRandomByOneNeg.npy')
|
||||
# negDiffer=np.load('./EmotionInfo/DifferRandomByOneNeg.npy')
|
||||
|
||||
# posNP=np.load('./EmotionInfo/XplainRandomByOnePos.npy')
|
||||
# posDiffer=np.load('./EmotionInfo/DifferRandomByOnePos.npy')
|
||||
|
||||
|
||||
|
||||
def CastToTrainData(emotion,theDifferCount):
|
||||
oriSenListZip=[]
|
||||
differCountZip=[]
|
||||
colorCountZip=[]
|
||||
|
||||
|
||||
path=None
|
||||
myNP=None
|
||||
myDiffer=None
|
||||
if(emotion=='neg'):
|
||||
path="./VectorList/negativeReviews/"
|
||||
myNP=negNP
|
||||
myDiffer=negDiffer
|
||||
else:
|
||||
path="./VectorList/positiveReviews/"
|
||||
myNP=posNP
|
||||
myDiffer=posDiffer
|
||||
|
||||
|
||||
|
||||
maxDiffer=0
|
||||
CriticakDifferCount=[]
|
||||
|
||||
for num in theDifferCount:
|
||||
if(abs(num)>maxDiffer):
|
||||
maxDiffer=abs(num)
|
||||
|
||||
for num in theDifferCount:
|
||||
if(abs(num)>(maxDiffer*0.5)):
|
||||
CriticakDifferCount.append(num)
|
||||
|
||||
counter=0
|
||||
for i in range(len(myNP)):
|
||||
trainDataNp=myNP[i]
|
||||
#从训练集中抽取一个数据
|
||||
if i%1000==0:
|
||||
print('i:',i)
|
||||
|
||||
#逐个部分计算差值
|
||||
gap=0
|
||||
|
||||
compTimes=10
|
||||
if len(CriticakDifferCount)<10:
|
||||
compTimes=len(CriticakDifferCount)
|
||||
|
||||
|
||||
if trainDataNp[compTimes-1]==0:
|
||||
continue
|
||||
|
||||
for j in (range(compTimes)):
|
||||
#这边直接退出,加了1,gap一定不满足小于0.03
|
||||
if trainDataNp[j]==0:
|
||||
gap+=1
|
||||
else:
|
||||
gap+=abs(CriticakDifferCount[j]-trainDataNp[j])
|
||||
|
||||
#print(gap)
|
||||
|
||||
#如果差距比较小,则打开观察
|
||||
if(gap<(0.001*len(trainDataNp))):
|
||||
print(i)
|
||||
|
||||
|
||||
|
||||
|
||||
name=''
|
||||
if(emotion=='neg'):
|
||||
for l in range(5):
|
||||
name=path+str(i)+'_'+str(l)+'.txt'
|
||||
if(os.path.exists(name)):
|
||||
break
|
||||
|
||||
else:
|
||||
for l in range(7,11):
|
||||
name=path+str(i+1000)+'_'+str(l)+'.txt'
|
||||
if(os.path.exists(name)):
|
||||
break
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
print(name)
|
||||
|
||||
with open(name ,'r',encoding='utf-8') as f:
|
||||
content=''
|
||||
for line in f.readlines():
|
||||
content+=line
|
||||
|
||||
content+='. '
|
||||
inputList=InputToSenList(content)
|
||||
if(len(inputList)>=50):
|
||||
continue
|
||||
oriSenListZip.append(inputList)
|
||||
|
||||
|
||||
|
||||
theDiffer=myDiffer[i].tolist()
|
||||
theDiffer=theDiffer[:len(inputList)]
|
||||
print('last:',theDiffer[len(inputList)-1])
|
||||
|
||||
differCountZip.append(theDiffer)
|
||||
colorCountZip.append(DifferToColor(theDiffer))
|
||||
|
||||
|
||||
counter+=1
|
||||
if(counter==5):
|
||||
print('end:',i)
|
||||
break
|
||||
|
||||
return oriSenListZip,differCountZip,colorCountZip
|
||||
|
||||
|
||||
def ReadTheFile(loc):
|
||||
dirpath='./data/names'
|
||||
|
||||
counter=0
|
||||
for files in os.listdir(dirpath):
|
||||
if not files.endswith('.txt'):
|
||||
continue
|
||||
# print(files)
|
||||
for line in open(dirpath+'/'+files,encoding='utf8'):
|
||||
counter+=1
|
||||
|
||||
if counter==loc:
|
||||
theData=''
|
||||
counter=0
|
||||
line = line.strip('\n')
|
||||
for word in line:
|
||||
theData = theData+word+' '
|
||||
counter+=1
|
||||
if counter==2:
|
||||
theData=theData+';'
|
||||
counter=0
|
||||
|
||||
if not theData.endswith(';'):
|
||||
theData=theData+';'
|
||||
|
||||
return files,theData
|
||||
|
||||
|
||||
|
||||
|
||||
def CastToTrainWithOrder(orderLine1,orderLine2,emotion,theDifferCount):
|
||||
|
||||
# oriSenListZip=[]
|
||||
# differCountZip=[]
|
||||
trainDataZip=[]
|
||||
allDataFile=[]
|
||||
|
||||
|
||||
|
||||
|
||||
myDiffer=np.load('./TrainDataDiffer.npy')
|
||||
|
||||
|
||||
|
||||
for line in orderLine1:
|
||||
oriSenListsmallZip=[]
|
||||
allOriSenZip=[]
|
||||
|
||||
differCountsmallZip=[]
|
||||
counter=0
|
||||
for i in range(len(myDiffer)):
|
||||
trainDataNp=myDiffer[i]
|
||||
#从训练集中抽取一个数据
|
||||
if i%1000==0:
|
||||
print('i:',i)
|
||||
|
||||
|
||||
|
||||
#逐个部分计算差值
|
||||
|
||||
lineLenth=len(line)
|
||||
# print(line)
|
||||
# print('linelenth',lineLenth)
|
||||
|
||||
for j in (range(10-lineLenth)):
|
||||
if trainDataNp[j] == 0:
|
||||
break
|
||||
gap=0
|
||||
for k in range(lineLenth):
|
||||
# print(trainDataNp )
|
||||
# print('k',k)
|
||||
# print( theDifferCount[int(line[k])])
|
||||
gap+=abs(trainDataNp[j+k] - theDifferCount[int(line[k])])
|
||||
|
||||
#print(gap)
|
||||
|
||||
#如果差距比较小,则打开观察
|
||||
if(gap<(0.001*len(trainDataNp))):
|
||||
|
||||
subName,content=ReadTheFile(i)
|
||||
|
||||
inputList=InputToSenList(content)
|
||||
|
||||
if(counter<5):
|
||||
oriSenListsmallZip.append({'name':subName,'content':inputList[j:j+lineLenth]})
|
||||
|
||||
|
||||
allOriSenZip.append({'name':subName,'content':inputList[j:j+lineLenth]})
|
||||
|
||||
theDiffer=trainDataNp.tolist()
|
||||
theDiffer=theDiffer[j:j+lineLenth]
|
||||
print('last:',theDiffer)
|
||||
|
||||
differCountsmallZip.append(theDiffer)
|
||||
#colorCountZip.append(DifferToColor(theDiffer))
|
||||
|
||||
|
||||
counter+=1
|
||||
break
|
||||
|
||||
# print(counter)
|
||||
|
||||
if(counter>=20):
|
||||
print('end:',i)
|
||||
break
|
||||
|
||||
trainDataZip.append({'emotion':'Class1','orderLine':line,'oriSenListsmallZip':oriSenListsmallZip,'differCountsmallZip':differCountsmallZip})
|
||||
|
||||
allDataFile.append({'Class':'Class1','orderLine':line,'oriSenListsmallZip':allOriSenZip})
|
||||
|
||||
# oriSenListZip.append(oriSenListsmallZip)
|
||||
# differCountZip.append(differCountsmallZip)
|
||||
|
||||
return trainDataZip,allDataFile
|
||||
# return oriSenListZip,differCountZip #,colorCountZip
|
||||
|
||||
|
||||
|
||||
def GetOrderLineDetail(orderLine1,orderLine2,oriSenList,differCount,localDifferCount,RNNModel):
|
||||
orderLineInfoZip=[]
|
||||
|
||||
meanImpZip = []
|
||||
# sentenZip = []
|
||||
allImpZip=[]
|
||||
|
||||
allLocImpZip=[]
|
||||
meanLocImpZip=[]
|
||||
|
||||
orderLines = orderLine1+orderLine2
|
||||
|
||||
res=[]
|
||||
|
||||
for line in orderLines:
|
||||
count = 0
|
||||
impor = 0
|
||||
locImpor = 0
|
||||
ablationInd = []
|
||||
allImp=[]
|
||||
allLocImp=[]
|
||||
for ind in line:
|
||||
count += 1
|
||||
impor += differCount[int(ind)]
|
||||
locImpor += localDifferCount[int(ind)]
|
||||
allImp.append(differCount[int(ind)])
|
||||
allLocImp.append((-1)*localDifferCount[int(ind)])
|
||||
ablationInd.append(int(ind))
|
||||
allImpZip.append(allImp)
|
||||
allLocImpZip.append(allLocImp)
|
||||
meanImpZip.append(format(impor/count, '.4f'))
|
||||
meanLocImpZip.append(format((-1)*locImpor/count, '.4f'))
|
||||
senten=[]
|
||||
for i in range(len(oriSenList)):
|
||||
if i not in ablationInd:
|
||||
senten.append(oriSenList[i])
|
||||
# sentenZip.append(senten)
|
||||
|
||||
res.append(RNNModel.Predict(' '.join(senten)))
|
||||
|
||||
|
||||
|
||||
tableReorderValue=GetTableReorderValue(orderLines,oriSenList,RNNModel)
|
||||
print('tableReorderValue',tableReorderValue)
|
||||
|
||||
for i in range(len(orderLines)):
|
||||
orderLineInfoZip.append({'id':i,'order':orderLines[i],'allImp':allImpZip[i],'allLocImp':allLocImpZip[i],'importance':meanImpZip[i],'locImportance':meanLocImpZip[i],'value':res[i],'resValue':tableReorderValue[i]})
|
||||
|
||||
print('orderLineInfoZip',orderLineInfoZip)
|
||||
|
||||
return orderLineInfoZip
|
||||
|
||||
|
||||
def GetTableReorderValue(orderLines,oriSenList,RNNModel):
|
||||
|
||||
oriRes=RNNModel.Predict(' '.join(oriSenList))
|
||||
#记录每次重新排序以后和原来结果的差值
|
||||
differCount=np.zeros(len(orderLines),dtype=float)
|
||||
|
||||
senNum=len(oriSenList)
|
||||
|
||||
batchSize=24
|
||||
|
||||
subNumCount1=0 #每次处理都记录这是第几个字句
|
||||
subNumCount2=0
|
||||
iterations=10
|
||||
|
||||
orderNums=len(orderLines)
|
||||
if orderNums==0:
|
||||
return None
|
||||
print('orderLines',orderLines)
|
||||
print('orderNums',orderNums)
|
||||
|
||||
|
||||
calTimes=1
|
||||
for i in range(iterations+15):
|
||||
|
||||
theWordCom=[]
|
||||
for j in range(batchSize):
|
||||
theOrder=orderLines[subNumCount1]
|
||||
shufOrder=copy.deepcopy(theOrder)
|
||||
shuffle(shufOrder)
|
||||
# print('theOrder',theOrder)
|
||||
# print('shufOrder',shufOrder)
|
||||
theInd=[]
|
||||
wordCom=""
|
||||
|
||||
for k in range(senNum):
|
||||
theInd.append(k)
|
||||
|
||||
# print('theInd',theInd)
|
||||
for k in range(len(theOrder)):
|
||||
# print('theOrder',theOrder[k])
|
||||
# print('shufOrder',shufOrder[k])
|
||||
theInd[int(theOrder[k])]=shufOrder[k]
|
||||
|
||||
# print('theInd',theInd)
|
||||
for ind in theInd:
|
||||
wordCom = wordCom+' '+oriSenList[int(ind)]
|
||||
|
||||
|
||||
theWordCom.append(wordCom)
|
||||
|
||||
|
||||
subNumCount1+=1
|
||||
if(subNumCount1>=orderNums):
|
||||
subNumCount1=0
|
||||
calTimes+=1
|
||||
if(i>iterations):
|
||||
break
|
||||
|
||||
|
||||
|
||||
|
||||
#res=sess.run(PreRNN.predicr,{PreRNN.inputData: comment})
|
||||
res=[]
|
||||
for com in theWordCom:
|
||||
res.append(RNNModel.Predict(com))
|
||||
#print(res)
|
||||
|
||||
#计算差值
|
||||
for j in range(batchSize):
|
||||
calDif=0
|
||||
|
||||
for i in range(len(res[j])):
|
||||
calDif+=abs(res[j][i]-oriRes[i])
|
||||
|
||||
calDif/=len(res)
|
||||
|
||||
differCount[subNumCount2]+=calDif
|
||||
subNumCount2+=1
|
||||
if(subNumCount2>=orderNums):
|
||||
subNumCount2=0
|
||||
calTimes+=1
|
||||
if(i>iterations):
|
||||
break
|
||||
|
||||
|
||||
|
||||
if(i>iterations and subNumCount2==0):
|
||||
break
|
||||
|
||||
differCount=differCount/calTimes
|
||||
# print('differCount',differCount)
|
||||
# reorderRes=[]
|
||||
# for preNum1 in differCount:
|
||||
# reorderRes.append([preNum1,(1-preNum1)])
|
||||
#返回差值
|
||||
return differCount
|
||||
|
||||
|
||||
|
||||
|
||||
def DifferToColor(differCount):
|
||||
maxProb=np.max(differCount)
|
||||
minProb=np.min(differCount)
|
||||
|
||||
colorCount=[]
|
||||
|
||||
for i in range(len(differCount)):
|
||||
|
||||
if(differCount[i]>0):
|
||||
decNum=int(differCount[i]*127/maxProb)+127
|
||||
color='#8888'+hex(decNum)[2:].zfill(2)
|
||||
colorCount.append(color)
|
||||
elif (differCount[i]<0):
|
||||
decNum=int(differCount[i]*127/minProb)+127
|
||||
color='#'+hex(decNum)[2:].zfill(2)+'8888'
|
||||
colorCount.append(color)
|
||||
else:
|
||||
colorCount.append('#888888')
|
||||
|
||||
|
||||
return colorCount
|
||||
|
||||
|
||||
def InputToSenList(senten,mark=' mark! '):
|
||||
stripSpecialChars=re.compile("[^A-Za-z0-9 ]+")
|
||||
senten=senten.lower().replace('<br />','')
|
||||
#print(senten)
|
||||
myinput=re.sub(stripSpecialChars,mark,senten)
|
||||
wordVec=myinput.split()
|
||||
|
||||
|
||||
markLoc=[]
|
||||
markLoc.append(0)
|
||||
subSenList=[]
|
||||
shiftNum=0
|
||||
for i in range(len(wordVec)):
|
||||
if wordVec[i-shiftNum]=='mark!':
|
||||
markLoc.append(i-shiftNum)
|
||||
wordVec.pop(i-shiftNum)
|
||||
shiftNum+=1
|
||||
|
||||
for i in range(len(markLoc)-1):
|
||||
wodList=" ".join(wordVec[markLoc[i]:markLoc[i+1]])
|
||||
if wodList:
|
||||
subSenList.append(wodList)
|
||||
|
||||
return subSenList
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
myinput="This is the worst movie ever made. Ever. It beats everything. I have never seen worse. Retire the trophy and give it to these people.....there's just no comparison.<br /><br />Even three days after watching this (for some reason I still don't know why) I cannot believe how insanely horrific this movie is/was. Its so bad. So far from anything that could be considered a movie, a story or anything that should have ever been created and brought into our existence.<br /><br />This made me question whether or not humans are truly put on this earth to do good. It made me feel disgusted with ourselves and our progress as a species in this universe. This type of movie sincerely hurts us as a society."
|
||||
# myinput=input("输入")
|
||||
|
||||
|
||||
CastToTrainData('neg',myinput)
|
||||
|
||||
|
|
@ -0,0 +1,527 @@
|
|||
import numpy as np
|
||||
import pandas as pd
|
||||
from sklearn.preprocessing import MinMaxScaler
|
||||
import numpy as np
|
||||
|
||||
# from Predict import GetSenList,PredictMulti
|
||||
import os
|
||||
|
||||
import re
|
||||
|
||||
|
||||
|
||||
|
||||
class TrainData():
|
||||
def __init__(self):
|
||||
|
||||
data = pd.read_csv("./CovidInfo/Aamerica.csv")
|
||||
|
||||
date = data["date"].values
|
||||
cases = data["comfirmed"].values
|
||||
|
||||
temp1 = []
|
||||
|
||||
for i in range(len(date)):
|
||||
date[i] = i
|
||||
if(i == 0):
|
||||
temp1.append(cases[0])
|
||||
if(i>0):
|
||||
temp1.append(cases[i] - cases[i-1])
|
||||
|
||||
cases = temp1[1:]
|
||||
|
||||
#抽出训练集
|
||||
date_train = date[0:len(cases)-25]
|
||||
cases_train = cases[0:len(cases)-25]
|
||||
|
||||
|
||||
#把训练数据打包
|
||||
cases_train = list(zip(date_train, cases_train))
|
||||
|
||||
|
||||
train1 = pd.DataFrame(cases_train, columns=['date', 'comfirmed'])
|
||||
|
||||
|
||||
|
||||
train1['date'] = train1['date'].astype(float)
|
||||
train1['comfirmed'] = train1['comfirmed'].astype(float)
|
||||
|
||||
|
||||
|
||||
x_train1 = train1.iloc[:, 1:2].values
|
||||
self.scaler1 = MinMaxScaler(feature_range = (0,1))
|
||||
self.scaler1.fit_transform(x_train1)
|
||||
|
||||
self.trainData=x_train1
|
||||
|
||||
|
||||
|
||||
|
||||
def CastToTrainData(emotion,theDifferCount):
|
||||
oriSenListZip=[]
|
||||
differCountZip=[]
|
||||
colorCountZip=[]
|
||||
|
||||
|
||||
path=None
|
||||
myNP=None
|
||||
myDiffer=None
|
||||
if(emotion=='neg'):
|
||||
path="./VectorList/negativeReviews/"
|
||||
myNP=negNP
|
||||
myDiffer=negDiffer
|
||||
else:
|
||||
path="./VectorList/positiveReviews/"
|
||||
myNP=posNP
|
||||
myDiffer=posDiffer
|
||||
|
||||
|
||||
|
||||
maxDiffer=0
|
||||
CriticakDifferCount=[]
|
||||
|
||||
for num in theDifferCount:
|
||||
if(abs(num)>maxDiffer):
|
||||
maxDiffer=abs(num)
|
||||
|
||||
for num in theDifferCount:
|
||||
if(abs(num)>(maxDiffer*0.5)):
|
||||
CriticakDifferCount.append(num)
|
||||
|
||||
counter=0
|
||||
for i in range(len(myNP)):
|
||||
trainDataNp=myNP[i]
|
||||
#从训练集中抽取一个数据
|
||||
if i%1000==0:
|
||||
print('i:',i)
|
||||
|
||||
#逐个部分计算差值
|
||||
gap=0
|
||||
|
||||
compTimes=10
|
||||
if len(CriticakDifferCount)<10:
|
||||
compTimes=len(CriticakDifferCount)
|
||||
|
||||
|
||||
if trainDataNp[compTimes-1]==0:
|
||||
continue
|
||||
|
||||
for j in (range(compTimes)):
|
||||
#这边直接退出,加了1,gap一定不满足小于0.03
|
||||
if trainDataNp[j]==0:
|
||||
gap+=1
|
||||
else:
|
||||
gap+=abs(CriticakDifferCount[j]-trainDataNp[j])
|
||||
|
||||
#print(gap)
|
||||
|
||||
#如果差距比较小,则打开观察
|
||||
if(gap<0.002):
|
||||
print(i)
|
||||
|
||||
|
||||
|
||||
|
||||
name=''
|
||||
if(emotion=='neg'):
|
||||
for l in range(5):
|
||||
name=path+str(i)+'_'+str(l)+'.txt'
|
||||
if(os.path.exists(name)):
|
||||
break
|
||||
|
||||
else:
|
||||
for l in range(7,11):
|
||||
name=path+str(i+1000)+'_'+str(l)+'.txt'
|
||||
if(os.path.exists(name)):
|
||||
break
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
print(name)
|
||||
|
||||
with open(name ,'r',encoding='utf-8') as f:
|
||||
content=''
|
||||
for line in f.readlines():
|
||||
content+=line
|
||||
|
||||
content+='. '
|
||||
inputList=InputToSenList(content)
|
||||
if(len(inputList)>=50):
|
||||
continue
|
||||
oriSenListZip.append(inputList)
|
||||
|
||||
|
||||
|
||||
theDiffer=myDiffer[i].tolist()
|
||||
theDiffer=theDiffer[:len(inputList)]
|
||||
print('last:',theDiffer[len(inputList)-1])
|
||||
|
||||
differCountZip.append(theDiffer)
|
||||
colorCountZip.append(DifferToColor(theDiffer))
|
||||
|
||||
|
||||
counter+=1
|
||||
if(counter==5):
|
||||
print('end:',i)
|
||||
break
|
||||
|
||||
return oriSenListZip,differCountZip,colorCountZip
|
||||
|
||||
|
||||
def CastToTrainWithOrderCovid(orderLine1,orderLine2,emotion,theDifferCount):
|
||||
# oriSenListZip=[]
|
||||
# differCountZip=[]
|
||||
MyTrainData=TrainData()
|
||||
|
||||
trainDataZip=[]
|
||||
|
||||
|
||||
myDiffer=np.load('./CovidInfo/TrainDataDifferCovid.npy')
|
||||
|
||||
|
||||
|
||||
for line in orderLine1:
|
||||
oriSenListsmallZip=[]
|
||||
differCountsmallZip=[]
|
||||
counter=0
|
||||
for i in range(len(myDiffer)):
|
||||
trainDataNp=myDiffer[i]
|
||||
#从训练集中抽取一个数据
|
||||
|
||||
#逐个部分计算差值
|
||||
|
||||
lineLenth=len(line)
|
||||
# print(line)
|
||||
# print('linelenth',lineLenth)
|
||||
|
||||
for j in (range(10-lineLenth)):
|
||||
if trainDataNp[j] == 0:
|
||||
break
|
||||
gap=0
|
||||
for k in range(lineLenth):
|
||||
# print(trainDataNp )
|
||||
# print('k',k)
|
||||
# print( theDifferCount[int(line[k])])
|
||||
gap+=abs(trainDataNp[j+k] - theDifferCount[int(line[k])])
|
||||
|
||||
|
||||
#如果差距比较小,则打开观察
|
||||
if(gap<3000):
|
||||
|
||||
|
||||
|
||||
content=MyTrainData.trainData[i:i+15]
|
||||
inputList=content.tolist()
|
||||
|
||||
subName=''
|
||||
for num in range(i+j,i+j+lineLenth+1):
|
||||
subName=subName+' '+str(num)
|
||||
|
||||
oriSenListsmallZip.append({'name':subName,'content':inputList[j:j+lineLenth]})
|
||||
|
||||
|
||||
|
||||
theDiffer=trainDataNp.tolist()
|
||||
theDiffer=theDiffer[j:j+lineLenth]
|
||||
print('last:',theDiffer)
|
||||
|
||||
differCountsmallZip.append(theDiffer)
|
||||
#colorCountZip.append(DifferToColor(theDiffer))
|
||||
|
||||
|
||||
counter+=1
|
||||
break
|
||||
|
||||
# print(counter)
|
||||
|
||||
if(counter>=5):
|
||||
print('end:',i)
|
||||
break
|
||||
|
||||
trainDataZip.append({'emotion':'pos','orderLine':line,'oriSenListsmallZip':oriSenListsmallZip,'differCountsmallZip':differCountsmallZip})
|
||||
|
||||
# oriSenListZip.append(oriSenListsmallZip)
|
||||
# differCountZip.append(differCountsmallZip)
|
||||
|
||||
|
||||
for line in orderLine2:
|
||||
oriSenListsmallZip=[]
|
||||
differCountsmallZip=[]
|
||||
counter=0
|
||||
for i in range(len(myDiffer)):
|
||||
trainDataNp=myDiffer[i]
|
||||
#从训练集中抽取一个数据
|
||||
|
||||
#逐个部分计算差值
|
||||
|
||||
lineLenth=len(line)
|
||||
# print(line)
|
||||
# print('linelenth',lineLenth)
|
||||
|
||||
for j in (range(10-lineLenth)):
|
||||
if trainDataNp[j] == 0:
|
||||
break
|
||||
gap=0
|
||||
for k in range(lineLenth):
|
||||
# print(trainDataNp )
|
||||
# print('k',k)
|
||||
# print( theDifferCount[int(line[k])])
|
||||
gap+=abs(trainDataNp[j+k] - theDifferCount[int(line[k])])
|
||||
|
||||
#print(gap)
|
||||
|
||||
#如果差距比较小,则打开观察
|
||||
if(gap<3000):
|
||||
|
||||
|
||||
|
||||
content=MyTrainData.trainData[i:i+15]
|
||||
inputList=content.tolist()
|
||||
|
||||
subName=''
|
||||
for num in range(i+j,i+j+lineLenth+1):
|
||||
subName=subName+' '+str(num)
|
||||
|
||||
oriSenListsmallZip.append({'name':subName,'content':inputList[j:j+lineLenth]})
|
||||
|
||||
|
||||
|
||||
theDiffer=trainDataNp.tolist()
|
||||
theDiffer=theDiffer[j:j+lineLenth]
|
||||
print('last:',theDiffer)
|
||||
|
||||
differCountsmallZip.append(theDiffer)
|
||||
#colorCountZip.append(DifferToColor(theDiffer))
|
||||
|
||||
|
||||
counter+=1
|
||||
break
|
||||
|
||||
# print(counter)
|
||||
|
||||
if(counter>=5):
|
||||
print('end:',i)
|
||||
break
|
||||
|
||||
trainDataZip.append({'emotion':'neg','orderLine':line,'oriSenListsmallZip':oriSenListsmallZip,'differCountsmallZip':differCountsmallZip})
|
||||
# oriSenListZip.append({oriSenListsmallZip})
|
||||
|
||||
# differCountZip.append(differCountsmallZip)
|
||||
|
||||
return trainDataZip
|
||||
# return oriSenListZip,differCountZip #,colorCountZip
|
||||
|
||||
|
||||
|
||||
def GetOrderLineDetailCovid(orderLine1,orderLine2,oriSenList,differCount,localDifferCount,RNNModel):
|
||||
|
||||
orderLineInfoZip=[]
|
||||
|
||||
meanImpZip = []
|
||||
|
||||
allImpZip=[]
|
||||
|
||||
|
||||
allLocImpZip=[]
|
||||
meanLocImpZip=[]
|
||||
|
||||
res=[]
|
||||
|
||||
|
||||
orderLines = orderLine1+orderLine2
|
||||
for line in orderLines:
|
||||
count = 0
|
||||
impor = 0
|
||||
locImpor = 0
|
||||
ablationInd = []
|
||||
allImp=[]
|
||||
allLocImp=[]
|
||||
for ind in line:
|
||||
count += 1
|
||||
impor += differCount[int(ind)]
|
||||
locImpor += localDifferCount[int(ind)]
|
||||
allImp.append(differCount[int(ind)])
|
||||
allLocImp.append((-1)*localDifferCount[int(ind)])
|
||||
ablationInd.append(int(ind))
|
||||
allImpZip.append(allImp)
|
||||
allLocImpZip.append(allLocImp)
|
||||
meanImpZip.append(format(impor/count, '.4f'))
|
||||
meanLocImpZip.append(format((-1)*locImpor/count, '.4f'))
|
||||
senten=[]
|
||||
for i in range(len(oriSenList)):
|
||||
if i not in ablationInd:
|
||||
senten.extend(oriSenList[i].split())
|
||||
|
||||
for i in range(15-len(senten)):
|
||||
senten.extend('0')
|
||||
# res=MyPredict.GetRes(senten)!!
|
||||
|
||||
res.append(RNNModel.Predict(' '.join(senten)))
|
||||
|
||||
|
||||
|
||||
|
||||
tableReorderValue=GetTableReorderValue(orderLines,oriSenList,RNNModel)
|
||||
print('tableReorderValue',tableReorderValue)
|
||||
|
||||
for i in range(len(orderLines)):
|
||||
orderLineInfoZip.append({'id':i,'order':orderLines[i],'allImp':allImpZip[i],'allLocImp':allLocImpZip[i],'importance':meanImpZip[i],'locImportance':meanLocImpZip[i],'value':res[i],'resValue':tableReorderValue[i]})
|
||||
|
||||
|
||||
return orderLineInfoZip
|
||||
|
||||
import copy
|
||||
from random import shuffle
|
||||
|
||||
def GetTableReorderValue(orderLines,oriSenList,RNNModel):
|
||||
|
||||
|
||||
#记录每次重新排序以后和原来结果的差值
|
||||
differCount=np.zeros(len(orderLines),dtype=float)
|
||||
|
||||
senNum=len(oriSenList)
|
||||
|
||||
batchSize=24
|
||||
|
||||
subNumCount1=0 #每次处理都记录这是第几个字句
|
||||
subNumCount2=0
|
||||
iterations=10
|
||||
|
||||
orderNums=len(orderLines)
|
||||
print('orderLines',orderLines)
|
||||
print('orderNums',orderNums)
|
||||
|
||||
|
||||
calTimes=1
|
||||
for i in range(iterations+15):
|
||||
|
||||
theWordCom=[]
|
||||
for j in range(batchSize):
|
||||
theOrder=orderLines[subNumCount1]
|
||||
shufOrder=copy.deepcopy(theOrder)
|
||||
shuffle(shufOrder)
|
||||
# print('theOrder',theOrder)
|
||||
# print('shufOrder',shufOrder)
|
||||
theInd=[]
|
||||
|
||||
wordCom=[]
|
||||
|
||||
for k in range(senNum):
|
||||
theInd.append(k)
|
||||
|
||||
# print('theInd',theInd)
|
||||
for k in range(len(theOrder)):
|
||||
# print('theOrder',theOrder[k])
|
||||
# print('shufOrder',shufOrder[k])
|
||||
theInd[int(theOrder[k])]=shufOrder[k]
|
||||
|
||||
# print('theInd',theInd)
|
||||
for ind in theInd:
|
||||
wordCom.extend(oriSenList[int(ind)].split())
|
||||
# wordCom = wordCom+' '+oriSenList[int(ind)]
|
||||
|
||||
for i in range(15-len(wordCom)):
|
||||
wordCom.extend('0')
|
||||
|
||||
theWordCom.append(' '.join(wordCom))
|
||||
|
||||
|
||||
subNumCount1+=1
|
||||
if(subNumCount1>=orderNums):
|
||||
subNumCount1=0
|
||||
calTimes+=1
|
||||
if(i>iterations):
|
||||
break
|
||||
|
||||
|
||||
|
||||
res=[]
|
||||
#res=sess.run(PreRNN.predicr,{PreRNN.inputData: comment})
|
||||
for com in theWordCom:
|
||||
res.append(RNNModel.Predict(com))
|
||||
#print(res)
|
||||
|
||||
#计算差值
|
||||
for j in range(batchSize):
|
||||
differCount[subNumCount2]+=res[j]
|
||||
subNumCount2+=1
|
||||
if(subNumCount2>=orderNums):
|
||||
subNumCount2=0
|
||||
calTimes+=1
|
||||
if(i>iterations):
|
||||
break
|
||||
|
||||
|
||||
|
||||
if(i>iterations and subNumCount2==0):
|
||||
break
|
||||
|
||||
differCount=differCount/calTimes
|
||||
|
||||
#返回差值
|
||||
return differCount
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def DifferToColor(differCount):
|
||||
maxProb=np.max(differCount)
|
||||
minProb=np.min(differCount)
|
||||
|
||||
colorCount=[]
|
||||
|
||||
for i in range(len(differCount)):
|
||||
|
||||
if(differCount[i]>0):
|
||||
decNum=int(differCount[i]*127/maxProb)+127
|
||||
color='#8888'+hex(decNum)[2:].zfill(2)
|
||||
colorCount.append(color)
|
||||
elif (differCount[i]<0):
|
||||
decNum=int(differCount[i]*127/minProb)+127
|
||||
color='#'+hex(decNum)[2:].zfill(2)+'8888'
|
||||
colorCount.append(color)
|
||||
else:
|
||||
colorCount.append('#888888')
|
||||
|
||||
|
||||
return colorCount
|
||||
|
||||
|
||||
def InputToSenList(senten,mark=' mark! '):
|
||||
stripSpecialChars=re.compile("[^A-Za-z0-9 ]+")
|
||||
senten=senten.lower().replace('<br />','')
|
||||
#print(senten)
|
||||
myinput=re.sub(stripSpecialChars,mark,senten)
|
||||
wordVec=myinput.split()
|
||||
|
||||
|
||||
markLoc=[]
|
||||
markLoc.append(0)
|
||||
subSenList=[]
|
||||
shiftNum=0
|
||||
for i in range(len(wordVec)):
|
||||
if wordVec[i-shiftNum]=='mark!':
|
||||
markLoc.append(i-shiftNum)
|
||||
wordVec.pop(i-shiftNum)
|
||||
shiftNum+=1
|
||||
|
||||
for i in range(len(markLoc)-1):
|
||||
subSenList.append(" ".join(wordVec[markLoc[i]:markLoc[i+1]]))
|
||||
|
||||
return subSenList
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
myinput="This is the worst movie ever made. Ever. It beats everything. I have never seen worse. Retire the trophy and give it to these people.....there's just no comparison.<br /><br />Even three days after watching this (for some reason I still don't know why) I cannot believe how insanely horrific this movie is/was. Its so bad. So far from anything that could be considered a movie, a story or anything that should have ever been created and brought into our existence.<br /><br />This made me question whether or not humans are truly put on this earth to do good. It made me feel disgusted with ourselves and our progress as a species in this universe. This type of movie sincerely hurts us as a society."
|
||||
# myinput=input("输入")
|
||||
|
||||
|
||||
CastToTrainData('neg',myinput)
|
||||
|
||||
|
|
@ -0,0 +1,425 @@
|
|||
import pandas as pd
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
import keras
|
||||
from sklearn.preprocessing import MinMaxScaler
|
||||
import re
|
||||
import copy
|
||||
from random import shuffle
|
||||
|
||||
|
||||
maxSeqLength=250
|
||||
|
||||
batchSize = 24
|
||||
lstmUnits = 64
|
||||
numClasses = 2
|
||||
numDimensions = 50 #Dimensions for each word vector
|
||||
|
||||
|
||||
|
||||
|
||||
#辅助函数
|
||||
from random import randint
|
||||
import re
|
||||
|
||||
#把整个评论切割成子句 输出list
|
||||
def InputToSenList(senten,mark=' mark! '):
|
||||
stripSpecialChars=re.compile("[^A-Za-z0-9 ]+")
|
||||
senten=senten.lower().replace('<br />','')
|
||||
#print(senten)
|
||||
myinput=re.sub(stripSpecialChars,mark,senten)
|
||||
wordVec=myinput.split()
|
||||
|
||||
|
||||
markLoc=[]
|
||||
markLoc.append(0)
|
||||
subSenList=[]
|
||||
shiftNum=0
|
||||
for i in range(len(wordVec)):
|
||||
if wordVec[i-shiftNum]=='mark!':
|
||||
markLoc.append(i-shiftNum)
|
||||
wordVec.pop(i-shiftNum)
|
||||
shiftNum+=1
|
||||
|
||||
for i in range(len(markLoc)-1):
|
||||
subSenList.append(" ".join(wordVec[markLoc[i]:markLoc[i+1]]))
|
||||
|
||||
return subSenList
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#把list转化为词嵌入向量的comment
|
||||
def ListToVecComment(tempSenList):
|
||||
wordsList=np.load('./VectorList/wordsList.npy')
|
||||
wordsList = wordsList.tolist() #Originally loaded as numpy array
|
||||
wordsList = [word.decode('UTF-8') for word in wordsList] #Encode words as UTF-8
|
||||
|
||||
|
||||
comment=np.zeros([batchSize,maxSeqLength])
|
||||
#comment保存的评论,每个字都是用字典中的数字组成的
|
||||
|
||||
|
||||
fullSent=' '.join(tempSenList)
|
||||
|
||||
counter=0
|
||||
|
||||
for word in fullSent.split():
|
||||
try:
|
||||
comment[0][counter]=wordsList.index(word)
|
||||
except Exception:
|
||||
comment[0][counter]=399999
|
||||
counter+=1
|
||||
if counter==250:
|
||||
break
|
||||
|
||||
return comment
|
||||
|
||||
def ListToVecCommentMulti(tempSenList):
|
||||
wordsList=np.load('./VectorList/wordsList.npy')
|
||||
wordsList = wordsList.tolist() #Originally loaded as numpy array
|
||||
wordsList = [word.decode('UTF-8') for word in wordsList] #Encode words as UTF-8
|
||||
|
||||
|
||||
comment=np.zeros([batchSize,maxSeqLength])
|
||||
#comment保存的评论,每个字都是用字典中的数字组成的
|
||||
|
||||
for i in range(len(tempSenList)):
|
||||
|
||||
fullSent=' '.join(tempSenList[i])
|
||||
|
||||
counter=0
|
||||
|
||||
for word in fullSent.split():
|
||||
try:
|
||||
comment[i][counter]=wordsList.index(word)
|
||||
except Exception:
|
||||
comment[i][counter]=399999
|
||||
counter+=1
|
||||
if counter==250:
|
||||
break
|
||||
|
||||
return comment
|
||||
|
||||
|
||||
|
||||
#人工智能部分
|
||||
|
||||
|
||||
#定义变量
|
||||
def DefVar(rnnType):
|
||||
|
||||
wordVectors = np.load('./VectorList/wordVectors.npy')
|
||||
|
||||
|
||||
tf.reset_default_graph()
|
||||
#24个数据,二分类问题,2个标签
|
||||
|
||||
#24条评论,每条评论200个长度,这时候每个字还是用id来表示的
|
||||
inputData = tf.placeholder(tf.int32, [batchSize, maxSeqLength])
|
||||
|
||||
data = tf.Variable(tf.zeros([batchSize, maxSeqLength, numDimensions]),dtype = tf.float32)
|
||||
#根据嵌入向量,把用id表示的单词转化为对应的词嵌入向量
|
||||
data = tf.nn.embedding_lookup(wordVectors,inputData)
|
||||
|
||||
|
||||
#lstmUnits决定了每个隐藏层输出的数量,这里输出为64维,输入为50维,权重矩阵就是64*114
|
||||
|
||||
lstmCell = tf.contrib.rnn.BasicLSTMCell(lstmUnits)
|
||||
|
||||
if(rnnType=='GRU'):
|
||||
lstmCell = tf.contrib.rnn.GRUCell(lstmUnits)
|
||||
elif(rnnType=='vanilla'):
|
||||
lstmCell = tf.contrib.rnn.BasicRNNCell(lstmUnits)
|
||||
|
||||
|
||||
|
||||
# 每条评论200个输入,就是200个隐藏神经元,每个神经元输出是64维
|
||||
initial_state = lstmCell.zero_state(batchSize, tf.float32)
|
||||
|
||||
value, _ = tf.nn.dynamic_rnn(lstmCell,data, initial_state=initial_state,dtype=tf.float32)
|
||||
|
||||
# _ 两部分一部分是24 *64 24条文本,64维的最后一个神经元的cell state的输出
|
||||
# 第二部分是24*64 64维的最后一个神经元的hidden state输出
|
||||
|
||||
#产生一个 64*2的随机矩阵
|
||||
weight = tf.Variable(tf.truncated_normal([lstmUnits,numClasses]))
|
||||
|
||||
#构造一个二位数组[0.1,0.1]
|
||||
bias = tf.Variable(tf.constant(0.1, shape=[numClasses]))
|
||||
|
||||
#把数组从维度上转置,变成了200*24*64 ,200神经元,24个样例,64个输出
|
||||
# value = tf.transpose(value,[1,0,2])
|
||||
|
||||
|
||||
return inputData,value,weight,bias
|
||||
|
||||
#定义predict函数
|
||||
def DefPreFun(value,weight,bias):
|
||||
#取出最后一个神经元的数据 24*64
|
||||
value = tf.transpose(value,[1,0,2])
|
||||
last=tf.gather(value,int(value.get_shape()[0])-1)
|
||||
|
||||
# 64维的向量映射到二分类的问题上,加上b
|
||||
prediction= (tf.matmul(last,weight)+bias)
|
||||
|
||||
#prediction=tf.argmax(prediction,1)
|
||||
output = tf.nn.softmax(prediction)
|
||||
|
||||
return output
|
||||
|
||||
|
||||
|
||||
def GetSenList(myinput,model='clause'):
|
||||
|
||||
senList=[]
|
||||
#只预测一天,只要最后十五天的数据
|
||||
tempList=myinput.split()[-15:]
|
||||
|
||||
if model=='word':
|
||||
senList=tempList
|
||||
else:
|
||||
senten = ''
|
||||
count = 0
|
||||
for number in tempList:
|
||||
senten += str(number)+' '
|
||||
count += 1
|
||||
if(count>=3):
|
||||
senList.append(senten)
|
||||
senten=''
|
||||
count=0
|
||||
|
||||
if senten:
|
||||
senList.append(senten)
|
||||
|
||||
|
||||
return senList
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
class PredictRNN():
|
||||
def __init__(self):
|
||||
keras.backend.clear_session()
|
||||
tf.reset_default_graph()
|
||||
self.model1=keras.models.load_model(savePath)
|
||||
|
||||
|
||||
|
||||
|
||||
data = pd.read_csv("Aamerica.csv")
|
||||
|
||||
date = data["date"].values
|
||||
cases = data["comfirmed"].values
|
||||
|
||||
temp1 = []
|
||||
|
||||
for i in range(len(date)):
|
||||
date[i] = i
|
||||
if(i == 0):
|
||||
temp1.append(cases[0])
|
||||
if(i>0):
|
||||
temp1.append(cases[i] - cases[i-1])
|
||||
|
||||
cases = temp1[1:]
|
||||
|
||||
#抽出训练集
|
||||
date_train = date[0:len(cases)-25]
|
||||
cases_train = cases[0:len(cases)-25]
|
||||
|
||||
|
||||
#把训练数据打包
|
||||
cases_train = list(zip(date_train, cases_train))
|
||||
|
||||
|
||||
train1 = pd.DataFrame(cases_train, columns=['date', 'comfirmed'])
|
||||
|
||||
|
||||
|
||||
train1['date'] = train1['date'].astype(float)
|
||||
train1['comfirmed'] = train1['comfirmed'].astype(float)
|
||||
|
||||
|
||||
|
||||
x_train1 = train1.iloc[:, 1:2].values
|
||||
self.scaler1 = MinMaxScaler(feature_range = (0,1))
|
||||
x_train1 = self.scaler1.fit_transform(x_train1)
|
||||
|
||||
def __del__(self):
|
||||
self.sess.close()
|
||||
|
||||
|
||||
def Predict(self,test_inputs):
|
||||
|
||||
fullSent = ' '.join(test_inputs)
|
||||
test_inputs = fullSent.split()
|
||||
|
||||
test_inputs = np.array(test_inputs)
|
||||
test_inputs = test_inputs.reshape(-1,1)
|
||||
|
||||
test_inputs = self.scaler1.transform(test_inputs)
|
||||
|
||||
|
||||
test_features = []
|
||||
test_features.append(test_inputs[-15:])
|
||||
|
||||
test_features=np.array(test_features)
|
||||
# test_features = np.reshape(test_features, (test_features.shape[0], test_features.shape[1], 1))
|
||||
prediction = self.model1.predict(test_features)
|
||||
prediction = self.scaler1.inverse_transform(prediction)
|
||||
|
||||
|
||||
return prediction[0][0]
|
||||
|
||||
def GetRes(self,reorder_inputs):
|
||||
predictions = []
|
||||
|
||||
for order in reorder_inputs:
|
||||
|
||||
orderInput=np.array(order)
|
||||
orderInput = orderInput.reshape(-1,1)
|
||||
|
||||
orderInput = self.scaler1.transform(orderInput)
|
||||
|
||||
|
||||
test_features = []
|
||||
test_features.append(orderInput[-15:])
|
||||
|
||||
test_features=np.array(test_features)
|
||||
# test_features = np.reshape(test_features, (test_features.shape[0], test_features.shape[1], 1))
|
||||
print('test_features',test_features)
|
||||
|
||||
prediction = self.model1.predict(test_features)
|
||||
prediction = self.scaler1.inverse_transform(prediction)
|
||||
predictions.append(prediction[0][0])
|
||||
|
||||
return predictions
|
||||
|
||||
|
||||
|
||||
|
||||
class ForPredictCovid():
|
||||
def __init__(self,myinput,modelType,judgeType):
|
||||
#拆分好的句子的list
|
||||
self.senList=GetSenList(myinput,'clause')
|
||||
#拆分好的单词的list
|
||||
self.wordList=GetSenList(myinput,'word')
|
||||
#根据选择的句子/单词 model 确定的list
|
||||
self.chosenList=self.senList
|
||||
#计算到的list长度
|
||||
self.sentenSize=len(self.chosenList)
|
||||
if modelType=='word':
|
||||
self.chosenList=self.wordList
|
||||
|
||||
self.judgeType=judgeType
|
||||
#self.rnnType=rnnType
|
||||
#定义好的RNN
|
||||
self.PreRNN=PredictRNN()
|
||||
#原序列的预测值
|
||||
self.oriRes=self.PreRNN.Predict(self.chosenList)
|
||||
|
||||
#只接受list的输入
|
||||
def Predict(self,tempSenList,rnnType):
|
||||
|
||||
|
||||
res=self.PreRNN.Predict(tempSenList)
|
||||
|
||||
return res
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def PredictMulti(tempSenList,rnnType):
|
||||
inputData,value,weight,bias=DefVar(rnnType)
|
||||
|
||||
predicr=DefPreFun(value,weight,bias)
|
||||
|
||||
sess=tf.InteractiveSession()
|
||||
|
||||
saver=tf.train.Saver()
|
||||
|
||||
if(rnnType=='GRU'):
|
||||
saver.restore(sess,'./modelsMoreGRU/pretrained_gru.ckpt-130000')
|
||||
elif(rnnType=='vanilla'):
|
||||
saver.restore(sess,'./modelsMoreVanilla/pretrained_gru.ckpt-500000')
|
||||
else:
|
||||
saver.restore(sess,'./modelsMoreMid/pretrained_lstm.ckpt-290000')
|
||||
|
||||
|
||||
comment=ListToVecCommentMulti(tempSenList)
|
||||
|
||||
res=sess.run(predicr, {inputData: comment})
|
||||
|
||||
return res
|
||||
|
||||
|
||||
def ListToVecCommentBySomeSub(tempSenList,loc):
|
||||
wordsList=np.load('./VectorList/wordsList.npy')
|
||||
wordsList = wordsList.tolist() #Originally loaded as numpy array
|
||||
wordsList = [word.decode('UTF-8') for word in wordsList] #Encode words as UTF-8
|
||||
|
||||
|
||||
comment=np.zeros([batchSize,maxSeqLength])
|
||||
#comment保存的评论,每个字都是用字典中的数字组成的
|
||||
listSize=len(tempSenList)
|
||||
|
||||
for i in range(batchSize):
|
||||
if(loc+i>=listSize):
|
||||
break
|
||||
|
||||
counter=0
|
||||
for word in tempSenList[loc+i].split():
|
||||
try:
|
||||
comment[i][counter]=wordsList.index(word)
|
||||
except Exception:
|
||||
comment[i][counter]=399999
|
||||
counter+=1
|
||||
if counter==250:
|
||||
break
|
||||
|
||||
|
||||
return comment
|
||||
|
||||
|
||||
def GetDeatail(tempSenList,rnnType):
|
||||
inputData,value,weight,bias=DefVar(rnnType)
|
||||
|
||||
predicr=DefPreFun(value,weight,bias)
|
||||
|
||||
sess=tf.InteractiveSession()
|
||||
|
||||
saver=tf.train.Saver()
|
||||
|
||||
if(rnnType=='GRU'):
|
||||
saver.restore(sess,'./modelsMoreGRU/pretrained_gru.ckpt-130000')
|
||||
elif(rnnType=='vanilla'):
|
||||
saver.restore(sess,'./modelsMoreVanilla/pretrained_gru.ckpt-500000')
|
||||
else:
|
||||
saver.restore(sess,'./modelsMoreMid/pretrained_lstm.ckpt-290000')
|
||||
|
||||
subSenRes=[]
|
||||
listSize=len(tempSenList)
|
||||
loc=0
|
||||
while True:
|
||||
comment=ListToVecCommentBySomeSub(tempSenList,loc)
|
||||
|
||||
res=sess.run(predicr, {inputData: comment})
|
||||
|
||||
for i in range(batchSize):
|
||||
subSenRes.append(res[i].tolist())
|
||||
loc+=1
|
||||
if(loc==listSize):
|
||||
break
|
||||
|
||||
if(loc==listSize):
|
||||
break
|
||||
|
||||
return subSenRes
|
|
@ -0,0 +1,37 @@
|
|||
#本文件为读取文件的辅助工具,根据不同的数据集的形式进行不同方式的读取
|
||||
#任何想要使用我们接口的用户,可参照本文件进行改写
|
||||
|
||||
|
||||
import os
|
||||
from typing import Counter
|
||||
from TrainDataPretreat import Rrtreat
|
||||
dirpath='./data/names'
|
||||
|
||||
trainData=[]
|
||||
from RNNModelPredict import aRNNModel
|
||||
PreRNN=aRNNModel()
|
||||
|
||||
for files in os.listdir(dirpath):
|
||||
if not files.endswith('.txt'):
|
||||
continue
|
||||
print(files)
|
||||
for line in open(dirpath+'/'+files,encoding='utf8'):
|
||||
theData=''
|
||||
counter=0
|
||||
line = line.strip('\n')
|
||||
for word in line:
|
||||
theData = theData+word+' '
|
||||
counter+=1
|
||||
if counter==2:
|
||||
theData=theData+';'
|
||||
counter=0
|
||||
|
||||
if not theData.endswith(';'):
|
||||
theData=theData+';'
|
||||
|
||||
trainData.append(theData)
|
||||
|
||||
print(trainData)
|
||||
|
||||
print(len(trainData))
|
||||
Rrtreat(trainData,PreRNN)
|
|
@ -0,0 +1,821 @@
|
|||
import numpy as np
|
||||
import tensorflow as tf
|
||||
import re
|
||||
import copy
|
||||
from random import shuffle
|
||||
import math
|
||||
# wordsList=np.load('./VectorList/wordsList.npy')
|
||||
# wordsList = wordsList.tolist() #Originally loaded as numpy array
|
||||
# wordsList = [word.decode('UTF-8') for word in wordsList] #Encode words as UTF-8
|
||||
|
||||
#辅助函数
|
||||
|
||||
#把整个评论切割成子句 输出list
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def InputToSenList(senten,model):
|
||||
mark=' mark! '
|
||||
#使用正则表达式确定是否要切分
|
||||
stripSpecialChars=re.compile("[^A-Za-z0-9 ]+")
|
||||
#把大写字母改成小写字母
|
||||
senten=senten.lower().replace('<br />','')
|
||||
#print(senten)
|
||||
#把所有的标点符号更换为mark
|
||||
subSenList=[]
|
||||
|
||||
if model=='clause':
|
||||
myinput=re.sub(stripSpecialChars,mark,senten)
|
||||
#wordVec保存的是token,即单词
|
||||
wordVec=myinput.split()
|
||||
|
||||
#markLoc保存mark!的位置,这就是标点符号的位置,作为切分子句的依据
|
||||
markLoc=[]
|
||||
markLoc.append(0)
|
||||
|
||||
shiftNum=0
|
||||
for i in range(len(wordVec)):
|
||||
if wordVec[i-shiftNum]=='mark!':
|
||||
markLoc.append(i-shiftNum)
|
||||
wordVec.pop(i-shiftNum)
|
||||
shiftNum+=1
|
||||
|
||||
#按照标点符号划分子句,把每个子句放入subSenList
|
||||
for i in range(len(markLoc)-1):
|
||||
subSenList.append(" ".join(wordVec[markLoc[i]:markLoc[i+1]]))
|
||||
else:
|
||||
myinput=re.sub(stripSpecialChars,' ',senten)
|
||||
#wordVec保存的是token,即单词
|
||||
subSenList=myinput.split()
|
||||
|
||||
return subSenList
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#把list转化为词嵌入向量的comment
|
||||
def ListToVecComment(tempSenList):
|
||||
global wordsList
|
||||
|
||||
comment=np.zeros([batchSize,maxSeqLength])
|
||||
#comment保存的评论,每个字都是用字典中的数字组成的
|
||||
|
||||
|
||||
fullSent=' '.join(tempSenList)
|
||||
|
||||
counter=0
|
||||
|
||||
for word in fullSent.split():
|
||||
try:
|
||||
comment[0][counter]=wordsList.index(word)
|
||||
except Exception:
|
||||
comment[0][counter]=399999
|
||||
counter+=1
|
||||
if counter==250:
|
||||
break
|
||||
|
||||
return comment
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def CountWordNum(strs):
|
||||
|
||||
count=1
|
||||
for word in strs:
|
||||
if word==' ':
|
||||
count+=1
|
||||
|
||||
return count
|
||||
|
||||
|
||||
|
||||
|
||||
def GetSenList(myinput,model='clause'):
|
||||
myinput+='. '
|
||||
#按照子句切分
|
||||
senList=InputToSenList(myinput)
|
||||
|
||||
#如果按照单词切分,则将子句重新整合,并按照空格切分
|
||||
if(model=='word'):
|
||||
fullSent=' '.join(senList)
|
||||
senList=fullSent.split()
|
||||
|
||||
return senList
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
maxSeqLength=250
|
||||
|
||||
batchSize = 24
|
||||
lstmUnits = 64
|
||||
numClasses = 2
|
||||
numDimensions = 50 #Dimensions for each word vector
|
||||
iterations = 1000 #100000
|
||||
learnRate=0
|
||||
|
||||
|
||||
#辅助函数
|
||||
from random import randint
|
||||
import keras
|
||||
|
||||
|
||||
|
||||
class ReorderByGroup():
|
||||
#这个类是计算局部可解释性、全局可解释性的数值、根据局部可解释性抽取orderline
|
||||
def __init__(self,myinput,RNNModel):
|
||||
#拆分好的句子的list
|
||||
self.senList=InputToSenList(myinput,'clause')
|
||||
print('senlist',self.senList)
|
||||
#拆分好的单词的list
|
||||
self.wordList=InputToSenList(myinput,'word')
|
||||
print('wordList',self.wordList)
|
||||
#根据选择的句子/单词 model 确定的list
|
||||
self.sentenSize=len(self.senList)
|
||||
#计算到的list长度
|
||||
|
||||
|
||||
|
||||
#定义好的RNN
|
||||
# self.PreRNN=PredictRNN()
|
||||
self.PreRNN=RNNModel
|
||||
#原序列的预测值
|
||||
#使用者的模型中必须有一个predict的方法
|
||||
self.oriRes=self.PreRNN.Predict(' '.join(self.senList))
|
||||
self.iterations=100
|
||||
#设置循环的次数,建议是总的子句数量*整体循环次数
|
||||
|
||||
|
||||
def ChanCompo(self,loc):
|
||||
#将指定子句和随机的任意另一个子句互换位置
|
||||
global wordsList
|
||||
|
||||
#记录所有的评论,这些评论是原来的文字
|
||||
comment=np.zeros([batchSize,maxSeqLength])
|
||||
#comment保存的评论,每个字都是用字典中的数字组成的
|
||||
|
||||
|
||||
for i in range(batchSize):
|
||||
|
||||
counter=0
|
||||
|
||||
#深复制,浅复制会复制地址而不重新开辟一个内存空间
|
||||
subSenList=copy.deepcopy(self.chosenList)
|
||||
|
||||
|
||||
subSenSize=len(subSenList)-1
|
||||
|
||||
|
||||
smallSenNum=loc
|
||||
bigSenNum=randint(0,subSenSize)
|
||||
#随机生成一个位置,将该位置的内容和loc的内容互换
|
||||
|
||||
|
||||
try:
|
||||
temp=subSenList[smallSenNum]
|
||||
subSenList[smallSenNum]=subSenList[bigSenNum]
|
||||
subSenList[bigSenNum]=temp
|
||||
except:
|
||||
print(smallSenNum,' !!! ',subSenList[smallSenNum])
|
||||
|
||||
loc+=1
|
||||
if(loc>subSenSize):
|
||||
loc=0
|
||||
|
||||
#子句位置调整好之后重新拼接并切分为单词
|
||||
fullSent=' '.join(subSenList)
|
||||
|
||||
# sentenList.append(fullSent)
|
||||
|
||||
|
||||
for word in fullSent.split():
|
||||
try:
|
||||
comment[i][counter]=wordsList.index(word)
|
||||
except Exception:
|
||||
comment[i][counter]=399999
|
||||
counter+=1
|
||||
if counter==250:
|
||||
break
|
||||
|
||||
|
||||
return comment #,sentenList
|
||||
|
||||
|
||||
|
||||
def ChanCompoInTokenRandom(self,loc):
|
||||
#破坏某个子句内部顺序(shuffle),依此计算该子句的重要性
|
||||
global wordsList
|
||||
|
||||
#记录所有的评论,这些评论是原来的文字
|
||||
comment=np.zeros([batchSize,maxSeqLength])
|
||||
#comment保存的评论,每个字都是用字典中的数字组成的
|
||||
|
||||
|
||||
for i in range(batchSize):
|
||||
|
||||
counter=0
|
||||
|
||||
subSenList=copy.deepcopy(self.chosenList)
|
||||
#拿到所有的子句
|
||||
|
||||
subSenSize=len(subSenList)-1
|
||||
|
||||
#选中这个子句
|
||||
subSenInLoc=subSenList[loc]
|
||||
|
||||
senInLocList=subSenInLoc.split()
|
||||
#将子句内部的数据进行调换,破坏子句内部的顺序
|
||||
shuffle(senInLocList)
|
||||
subSenList[loc]=' '.join(senInLocList)
|
||||
|
||||
# smallSenNum=loc
|
||||
# bigSenNum=randint(0,subSenSize)
|
||||
|
||||
|
||||
loc+=1
|
||||
if(loc>subSenSize):
|
||||
loc=0
|
||||
|
||||
|
||||
fullSent=' '.join(subSenList)
|
||||
|
||||
# sentenList.append(fullSent)
|
||||
|
||||
|
||||
for word in fullSent.split():
|
||||
try:
|
||||
comment[i][counter]=wordsList.index(word)
|
||||
except Exception:
|
||||
comment[i][counter]=399999
|
||||
counter+=1
|
||||
if counter==250:
|
||||
break
|
||||
|
||||
|
||||
return comment #,sentenList
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def ChanInpSubByOne(self,subNumCount):
|
||||
subSenList=[]
|
||||
|
||||
smallSenNum=subNumCount
|
||||
bigSenNum=randint(0,self.sentenSize-1)
|
||||
|
||||
if(smallSenNum>bigSenNum):
|
||||
temp=smallSenNum
|
||||
smallSenNum=bigSenNum
|
||||
bigSenNum=temp
|
||||
|
||||
for j in range(self.sentenSize):
|
||||
if j==bigSenNum:
|
||||
|
||||
subSenList.append(self.senList[smallSenNum])
|
||||
elif j>=smallSenNum and j<bigSenNum:
|
||||
subSenList.append(self.senList[j+1])
|
||||
else:
|
||||
subSenList.append(self.senList[j])
|
||||
|
||||
|
||||
|
||||
fullSent=' '.join(subSenList)
|
||||
|
||||
|
||||
|
||||
|
||||
return fullSent #,sentenList
|
||||
|
||||
|
||||
def ChanTokenInDefinitedPart(self,wodNumCount,tokenLocInSub):
|
||||
#记录所有的评论,这些评论是原来的文字
|
||||
|
||||
|
||||
|
||||
subSenList=copy.deepcopy(self.senList)
|
||||
|
||||
|
||||
senLoc=tokenLocInSub[wodNumCount][0]
|
||||
wordLoc=tokenLocInSub[wodNumCount][1]
|
||||
|
||||
|
||||
subSenInLoc=subSenList[senLoc]
|
||||
|
||||
|
||||
senInLocList=subSenInLoc.split()
|
||||
|
||||
|
||||
ranWordLoc=randint(0,len(senInLocList)-1)
|
||||
|
||||
|
||||
temp=senInLocList[wordLoc]
|
||||
senInLocList[wordLoc]=senInLocList[ranWordLoc]
|
||||
senInLocList[ranWordLoc]=temp
|
||||
|
||||
|
||||
subSenList[senLoc]=' '.join(senInLocList)
|
||||
|
||||
|
||||
|
||||
fullSent=' '.join(subSenList)
|
||||
|
||||
|
||||
return fullSent
|
||||
|
||||
|
||||
|
||||
def GetTokenImportance(self):
|
||||
#调用算法计算每个单词的局部重要性,
|
||||
# 并通过计算子句中所包含单词的重要性求和取平均值计算子句的局部重要性
|
||||
wordSize=len(self.wordList)
|
||||
|
||||
#子句局部重要性
|
||||
senDifferCount=np.zeros(self.sentenSize,dtype=float)
|
||||
|
||||
#单词局部重要性
|
||||
tokenDifferCount=np.zeros(wordSize,dtype=float)
|
||||
|
||||
#计算单词在子句中的位置
|
||||
tokenLocInSub=np.zeros([wordSize,2],dtype=int)
|
||||
|
||||
wordLoc=0
|
||||
|
||||
|
||||
|
||||
#计算某个单词在哪个子句中,且在该子句中的哪个位置
|
||||
for i in range (len(self.senList)):
|
||||
for j in range (len(self.senList[i].split())):
|
||||
tokenLocInSub[wordLoc][0]=i
|
||||
tokenLocInSub[wordLoc][1]=j
|
||||
wordLoc+=1
|
||||
|
||||
|
||||
# subNumCount=0 #每次处理都记录这是第几个字句
|
||||
iterations=self.iterations*4
|
||||
wodNumCount=0
|
||||
calTimes=0
|
||||
counter=0
|
||||
|
||||
while True:
|
||||
#计算第subNumCount个单词的局部重要性
|
||||
comment=self.ChanTokenInDefinitedPart(wodNumCount,tokenLocInSub)
|
||||
|
||||
|
||||
res=self.PreRNN.Predict(comment)
|
||||
|
||||
|
||||
calDif=0
|
||||
|
||||
for i in range(len(res)):
|
||||
calDif+=abs(res[i]-self.oriRes[i])
|
||||
|
||||
calDif/=len(res)
|
||||
|
||||
tokenDifferCount[wodNumCount]+=calDif
|
||||
wodNumCount+=1
|
||||
counter+=1
|
||||
|
||||
|
||||
if wodNumCount>=wordSize:
|
||||
wodNumCount=0
|
||||
calTimes+=1
|
||||
if counter>iterations:
|
||||
break
|
||||
|
||||
|
||||
tokenDifferCount=tokenDifferCount/calTimes
|
||||
#计算出单词局部重要性
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#将第i个子句中的所有单词的局部重要性累加
|
||||
wordLoc=0
|
||||
for i in range (len(self.senList)):
|
||||
for j in range (len(self.senList[i].split())):
|
||||
senDifferCount[i]+=tokenDifferCount[wordLoc]
|
||||
wordLoc+=1
|
||||
|
||||
#讲第i个子句的局部重要性总和除以该子句单词数量
|
||||
|
||||
senDifferCount[i]/=len(self.senList[i].split())
|
||||
|
||||
|
||||
return senDifferCount,tokenDifferCount,tokenLocInSub
|
||||
|
||||
#按照output计算reorder 的影响大小
|
||||
def ReorderByOutGlobal(self):
|
||||
|
||||
#记录每次重新排序以后和原来结果的差值
|
||||
differCount=np.zeros(self.sentenSize,dtype=float)
|
||||
|
||||
|
||||
|
||||
subNumCount=0 #每次处理都记录这是第几个字句
|
||||
iterations=self.iterations
|
||||
|
||||
|
||||
|
||||
calTimes=0
|
||||
counter=0
|
||||
while True:
|
||||
|
||||
comment=self.ChanInpSubByOne(subNumCount)
|
||||
|
||||
#res=sess.run(PreRNN.predicr,{PreRNN.inputData: comment})
|
||||
res=self.PreRNN.Predict(comment)
|
||||
#print(res)
|
||||
|
||||
calDif=0
|
||||
|
||||
print('res',res)
|
||||
print('oriRes',self.oriRes)
|
||||
|
||||
for i in range(len(res)):
|
||||
calDif+=abs(res[i]-self.oriRes[i])
|
||||
|
||||
calDif/=len(res)
|
||||
print('differcount',differCount[subNumCount])
|
||||
print('caldif',calDif)
|
||||
|
||||
differCount[subNumCount]+=calDif
|
||||
subNumCount+=1
|
||||
counter+=1
|
||||
|
||||
if subNumCount>=self.sentenSize:
|
||||
subNumCount=0
|
||||
calTimes+=1
|
||||
if counter>iterations:
|
||||
break
|
||||
|
||||
|
||||
|
||||
|
||||
differCount=differCount/calTimes
|
||||
|
||||
|
||||
return differCount
|
||||
|
||||
|
||||
|
||||
def CalColor(self,percent,color):
|
||||
#计算颜色
|
||||
theColor="#"
|
||||
gray='c4d7d6'
|
||||
blue='baccd9'
|
||||
red='eeb8c3'
|
||||
print('colorPercent',percent)
|
||||
if color=='blue':
|
||||
#如果颜色情感为蓝色
|
||||
for i in range(3):
|
||||
#分别提取蓝色、灰色rgb的16进制数值并转化为10进制
|
||||
blueR=int(blue[i*2:i*2+2],16)
|
||||
grayR=int(gray[i*2:i*2+2],16)
|
||||
#将r管道的10进制差值计算出来乘以百分比,再加上灰色的管道数值,
|
||||
# 算到该情感表达的颜色应该是灰色偏向蓝色多少
|
||||
R=int((blueR-grayR)*percent)+grayR
|
||||
theColor+=hex(R)[2:].zfill(2)
|
||||
|
||||
elif color=='red':
|
||||
for i in range(3):
|
||||
redR=int(red[i*2:i*2+2],16)
|
||||
grayR=int(gray[i*2:i*2+2],16)
|
||||
R=int((redR-grayR)*percent)+grayR
|
||||
theColor+=hex(R)[2:].zfill(2)
|
||||
|
||||
|
||||
return theColor
|
||||
|
||||
def GetImportanceByColor(self):
|
||||
|
||||
#计算global 的重要性
|
||||
differCount=self.ReorderByOutGlobal()
|
||||
differCount=differCount.tolist()
|
||||
# oriDifferCount=differCount
|
||||
|
||||
maxProb=np.max(differCount)
|
||||
minProb=np.min(differCount)
|
||||
#算出最大最小值
|
||||
|
||||
colorCount=[]
|
||||
|
||||
for i in range(len(differCount)):
|
||||
#根据正负,确定不同的情感趋势,计算出百分比提交给函数,进行计算
|
||||
if(differCount[i]>0):
|
||||
colorCount.append(self.CalColor(differCount[i]/maxProb,'blue'))
|
||||
elif (differCount[i]<0):
|
||||
colorCount.append(self.CalColor(differCount[i]/minProb,'red'))
|
||||
else:
|
||||
colorCount.append('#b2b9b4')
|
||||
|
||||
self.colorCount=colorCount
|
||||
self.differCount=differCount
|
||||
|
||||
print(colorCount)
|
||||
|
||||
globalDataZip={'differCount':differCount,'colorCount':colorCount}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#计算local重要性
|
||||
senDifferCount,tokenDifferCount,tokenLocInSub=self.GetTokenImportance()
|
||||
|
||||
|
||||
senDifferCount=senDifferCount.tolist()
|
||||
localColorCount=[]
|
||||
|
||||
|
||||
maxProb=np.max(senDifferCount)
|
||||
minProb=np.min(senDifferCount)
|
||||
|
||||
|
||||
for i in range(len(senDifferCount)):
|
||||
if(senDifferCount[i]>0):
|
||||
localColorCount.append(self.CalColor(senDifferCount[i]/maxProb,'blue'))
|
||||
elif (senDifferCount[i]<0):
|
||||
localColorCount.append(self.CalColor(senDifferCount[i]/minProb,'red'))
|
||||
else:
|
||||
localColorCount.append('#b2b9b4')
|
||||
|
||||
|
||||
|
||||
tokenColorZip,tokenDifferCountZip = self.GetTokenColorZip(tokenDifferCount,tokenLocInSub)
|
||||
|
||||
|
||||
localDataZip={'localColorCount':localColorCount,'senDifferCount':senDifferCount,'tokenColorZip':tokenColorZip,'tokenDifferCountZip':tokenDifferCountZip}
|
||||
|
||||
|
||||
|
||||
|
||||
#计算阈值,即绝对值最大的数值乘以0.5
|
||||
threshold = abs(maxProb) if abs(maxProb)>abs(minProb) else abs(minProb)
|
||||
threshold*=0.5
|
||||
#根据阈值提取orderline
|
||||
localOrderLine,lineDiffer=self.GetLocalOrderLine(senDifferCount,threshold)
|
||||
print('localOrderLine',localOrderLine)
|
||||
|
||||
print('lineDiffer',lineDiffer)
|
||||
|
||||
lineDifferColor=[]
|
||||
maxProb=np.max(lineDiffer)
|
||||
minProb=np.min(lineDiffer)
|
||||
for i in range(len(lineDiffer)):
|
||||
if(lineDiffer[i]>0):
|
||||
lineDifferColor.append(self.CalColor(lineDiffer[i]/maxProb,'blue'))
|
||||
elif (lineDiffer[i]<0):
|
||||
lineDifferColor.append(self.CalColor(lineDiffer[i]/minProb,'red'))
|
||||
else:
|
||||
lineDifferColor.append('#b2b9b4')
|
||||
|
||||
orderLineZip={'localOrderLine':localOrderLine,'lineDiffer':lineDiffer,'lineDifferColor':lineDifferColor}
|
||||
|
||||
return globalDataZip,localDataZip,orderLineZip #,oriDifferCount
|
||||
|
||||
def ShufflePatter(self,start,end):
|
||||
subSenList=copy.deepcopy(self.senList)
|
||||
|
||||
|
||||
subSenInLoc=' '.join(subSenList[start:end+1])
|
||||
|
||||
senInLocList=subSenInLoc.split()
|
||||
shuffle(senInLocList)
|
||||
|
||||
|
||||
fullSent=' '.join(subSenList[:start])+' '+' '.join(senInLocList)+' '+' '.join(subSenList[end+1:])
|
||||
|
||||
|
||||
|
||||
return fullSent
|
||||
|
||||
|
||||
|
||||
def GetLocalOrderLine(self,differCount,threshold):
|
||||
#根据重要性和阈值计算orderline
|
||||
|
||||
critLoc=[]
|
||||
orderLine=[]
|
||||
lineDiffer=[]
|
||||
|
||||
for i in range(len(differCount)):
|
||||
if(abs(differCount[i])>threshold):
|
||||
critLoc.append(i)
|
||||
|
||||
print(critLoc)
|
||||
for loc in range(len(critLoc)):
|
||||
|
||||
index=critLoc[loc]
|
||||
#取出关键子句
|
||||
front=-1
|
||||
if loc>0:
|
||||
front=critLoc[loc-1]
|
||||
|
||||
back=self.sentenSize
|
||||
if loc<len(critLoc)-1:
|
||||
back=critLoc[loc+1]
|
||||
|
||||
start=index
|
||||
end=index
|
||||
|
||||
|
||||
|
||||
oneGap=0
|
||||
for i in range(10):
|
||||
comment=self.ShufflePatter(index,index)
|
||||
|
||||
res=self.PreRNN.Predict(comment)
|
||||
calDif=0
|
||||
for i in range(len(res)):
|
||||
calDif+=abs(res[i]-self.oriRes[i])
|
||||
calDif/=len(res)
|
||||
|
||||
oneGap+= calDif
|
||||
# if oneRes:
|
||||
# oneRes+=self.PreRNN.Predict(comment)
|
||||
# else:
|
||||
# oneRes=self.PreRNN.Predict(comment)
|
||||
oneGap = oneGap/10
|
||||
|
||||
# comment=self.ShufflePatter(index,index)
|
||||
# oneRes=self.PreRNN.GetRes(comment)
|
||||
# oneRes = np.sum(oneRes,axis=0)/24
|
||||
|
||||
theGap=oneGap
|
||||
for froSen in range(index-1,front,-1):
|
||||
resGap=0
|
||||
for i in range(10):
|
||||
comment=self.ShufflePatter(index,index)
|
||||
res=self.PreRNN.Predict(comment)
|
||||
calDif=0
|
||||
for i in range(len(res)):
|
||||
calDif+=abs(res[i]-self.oriRes[i])
|
||||
calDif/=len(res)
|
||||
|
||||
resGap+= calDif
|
||||
resGap = resGap/10
|
||||
|
||||
|
||||
if(resGap)<abs(oneGap):
|
||||
break
|
||||
start=froSen
|
||||
|
||||
for backSen in range(index+1,back):
|
||||
resGap=0
|
||||
for i in range(10):
|
||||
comment=self.ShufflePatter(index,index)
|
||||
res=self.PreRNN.Predict(comment)
|
||||
calDif=0
|
||||
for i in range(len(res)):
|
||||
calDif+=abs(res[i]-self.oriRes[i])
|
||||
calDif/=len(res)
|
||||
|
||||
resGap+= calDif
|
||||
resGap = resGap/10
|
||||
|
||||
if(resGap)<abs(oneGap):
|
||||
break
|
||||
end=backSen
|
||||
theGap=resGap
|
||||
|
||||
theList=[]
|
||||
for i in range(start,end+1):
|
||||
theList.append(i)
|
||||
orderLine.append(theList)
|
||||
lineDiffer.append(theGap)
|
||||
|
||||
print(orderLine)
|
||||
return orderLine,lineDiffer
|
||||
|
||||
|
||||
|
||||
def GetTokenColorZip(self,tokenDifferCount,tokenLocInSub):
|
||||
tokenColorZip=[]
|
||||
differCountZip=[]
|
||||
|
||||
|
||||
for i in range(self.sentenSize):
|
||||
differCount=[]
|
||||
|
||||
for j in range(len(tokenLocInSub)):
|
||||
if tokenLocInSub[j][0]==i:
|
||||
differCount.append(tokenDifferCount[j])
|
||||
|
||||
tokenColor=[]
|
||||
maxProb=np.max(differCount)
|
||||
minProb=np.min(differCount)
|
||||
|
||||
|
||||
for i in range(len(differCount)):
|
||||
|
||||
if(differCount[i]>0):
|
||||
tokenColor.append(self.CalColor(differCount[i]/maxProb,'blue'))
|
||||
elif (differCount[i]<0):
|
||||
tokenColor.append(self.CalColor(differCount[i]/minProb,'red'))
|
||||
else:
|
||||
tokenColor.append('#b2b9b4')
|
||||
|
||||
tokenColorZip.append(tokenColor)
|
||||
differCountZip.append(differCount)
|
||||
|
||||
return tokenColorZip,differCountZip
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def DealDataZip(self,DataZip):
|
||||
oriSenListZip=[]
|
||||
colorCountZip=[]
|
||||
differCountZip=[]
|
||||
|
||||
for data in DataZip:
|
||||
oriSenList=InputToSenList(data)
|
||||
oriSenListZip.append(oriSenList)
|
||||
#另一方面传递给机器模型,让其进行预测
|
||||
|
||||
#res=Predict(oriSenList,rnnType)
|
||||
|
||||
colorCount,differCount=self.GetImportanceByColor()
|
||||
colorCountZip.append(colorCount)
|
||||
differCount=differCount.tolist()
|
||||
differCountZip.append(differCount)
|
||||
|
||||
return oriSenListZip,colorCountZip,differCountZip
|
||||
|
||||
|
||||
|
||||
def ListToVecCommentBySomeSub(self,tempSenList,loc):
|
||||
global wordsList
|
||||
|
||||
comment=np.zeros([batchSize,maxSeqLength])
|
||||
#comment保存的评论,每个字都是用字典中的数字组成的
|
||||
listSize=len(tempSenList)
|
||||
|
||||
for i in range(batchSize):
|
||||
if(loc+i>=listSize):
|
||||
break
|
||||
|
||||
counter=0
|
||||
for word in tempSenList[loc+i].split():
|
||||
try:
|
||||
comment[i][counter]=wordsList.index(word)
|
||||
except Exception:
|
||||
comment[i][counter]=399999
|
||||
counter+=1
|
||||
if counter==250:
|
||||
break
|
||||
|
||||
|
||||
return comment
|
||||
|
||||
def GetDeatail(self):
|
||||
|
||||
subSenRes=[]
|
||||
|
||||
loc=0
|
||||
while True:
|
||||
comment=self.ListToVecCommentBySomeSub(self.senList,loc)
|
||||
res=self.PreRNN.GetRes(comment)
|
||||
# res=sess.run(predicr, {inputData: comment})
|
||||
|
||||
for i in range(batchSize):
|
||||
subSenRes.append(res[i].tolist())
|
||||
loc+=1
|
||||
if(loc==self.sentenSize):
|
||||
break
|
||||
|
||||
if(loc==self.sentenSize):
|
||||
break
|
||||
|
||||
return subSenRes
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
myinput="This is the worst movie ever made. Ever. It beats everything. I have never seen worse. Retire the trophy and give it to these people.....there's just no comparison.<br /><br />Even three days after watching this (for some reason I still don't know why) I cannot believe how insanely horrific this movie is/was. Its so bad. So far from anything that could be considered a movie, a story or anything that should have ever been created and brought into our existence.<br /><br />This made me question whether or not humans are truly put on this earth to do good. It made me feel disgusted with ourselves and our progress as a species in this universe. This type of movie sincerely hurts us as a society."
|
||||
# myinput=input("输入")
|
||||
|
||||
oriSenList=InputToSenListToReplace(myinput)
|
||||
print(oriSenList)
|
||||
|
|
@ -0,0 +1,646 @@
|
|||
import pandas as pd
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
import keras
|
||||
from sklearn.preprocessing import MinMaxScaler
|
||||
import re
|
||||
import copy
|
||||
from random import shuffle
|
||||
import math
|
||||
|
||||
|
||||
#辅助函数
|
||||
|
||||
#把整个评论切割成子句 输出list
|
||||
|
||||
|
||||
#myinput必须是string类型
|
||||
def InputToSenList(senten,model):
|
||||
mark=' mark! '
|
||||
#使用正则表达式确定是否要切分
|
||||
stripSpecialChars=re.compile("[^A-Za-z0-9 ]+")
|
||||
#把大写字母改成小写字母
|
||||
senten=senten.lower().replace('<br />','')
|
||||
#print(senten)
|
||||
#把所有的标点符号更换为mark
|
||||
subSenList=[]
|
||||
|
||||
if model=='clause':
|
||||
myinput=re.sub(stripSpecialChars,mark,senten)
|
||||
#wordVec保存的是token,即单词
|
||||
wordVec=myinput.split()
|
||||
|
||||
#markLoc保存mark!的位置,这就是标点符号的位置,作为切分子句的依据
|
||||
markLoc=[]
|
||||
markLoc.append(0)
|
||||
|
||||
shiftNum=0
|
||||
for i in range(len(wordVec)):
|
||||
if wordVec[i-shiftNum]=='mark!':
|
||||
markLoc.append(i-shiftNum)
|
||||
wordVec.pop(i-shiftNum)
|
||||
shiftNum+=1
|
||||
|
||||
#按照标点符号划分子句,把每个子句放入subSenList
|
||||
for i in range(len(markLoc)-1):
|
||||
subSenList.append(" ".join(wordVec[markLoc[i]:markLoc[i+1]]))
|
||||
else:
|
||||
myinput=re.sub(stripSpecialChars,' ',senten)
|
||||
#wordVec保存的是token,即单词
|
||||
subSenList=myinput.split()
|
||||
|
||||
return subSenList
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
# def GetSenList(myinput,model='clause'):
|
||||
|
||||
# senList=[]
|
||||
# #将string类型切割开
|
||||
# tempList=myinput.split()
|
||||
|
||||
# if model=='word':
|
||||
# senList=tempList
|
||||
# else:
|
||||
# senten = ''
|
||||
# count = 0
|
||||
# for number in tempList:
|
||||
# senten += str(number)+' '
|
||||
# count += 1
|
||||
# if(count>=3):
|
||||
# senList.append(senten)
|
||||
# senten=''
|
||||
# count=0
|
||||
|
||||
# if senten:
|
||||
# senList.append(senten)
|
||||
|
||||
|
||||
# return senList
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#辅助函数
|
||||
from random import randint
|
||||
|
||||
|
||||
|
||||
|
||||
class ReorderByGroupCovid():
|
||||
def __init__(self,myinput,RNNModel):
|
||||
#拆分好的句子的list
|
||||
self.senList=InputToSenList(myinput,'clause')
|
||||
#拆分好的单词的list
|
||||
self.wordList=InputToSenList(myinput,'word')
|
||||
#根据选择的句子/单词 model 确定的list
|
||||
|
||||
#计算到的list长度
|
||||
self.sentenSize=len(self.senList)
|
||||
|
||||
print('senlist',self.senList)
|
||||
#定义好的RNN
|
||||
# self.PreRNN=PredictRNN()
|
||||
self.PreRNN=RNNModel
|
||||
#原序列的预测值
|
||||
#使用者的模型中必须有一个predict的方法
|
||||
self.oriRes=self.PreRNN.Predict(' '.join(self.senList))
|
||||
self.iterations=100
|
||||
#设置循环的次数,建议是总的子句数量*整体循环次数
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#按照output计算reorder 的影响大小
|
||||
def ReorderByOutGlobal(self):
|
||||
|
||||
#记录每次重新排序以后和原来结果的差值
|
||||
differCount=np.zeros(self.sentenSize,dtype=float)
|
||||
|
||||
|
||||
|
||||
subNumCount=0 #每次处理都记录这是第几个字句
|
||||
iterations=self.iterations
|
||||
|
||||
|
||||
calTimes=0
|
||||
counter=0
|
||||
while True:
|
||||
|
||||
comment=self.ChanInpSubByOne(subNumCount)
|
||||
|
||||
#res=sess.run(PreRNN.predicr,{PreRNN.inputData: comment})
|
||||
res=self.PreRNN.Predict(comment)
|
||||
#print(res)
|
||||
|
||||
|
||||
differCount[subNumCount]+=(res-self.oriRes)
|
||||
subNumCount+=1
|
||||
counter+=1
|
||||
|
||||
if subNumCount>=self.sentenSize:
|
||||
subNumCount=0
|
||||
calTimes+=1
|
||||
if counter>iterations:
|
||||
break
|
||||
|
||||
|
||||
|
||||
|
||||
differCount=differCount/calTimes
|
||||
|
||||
|
||||
return differCount
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def ChanInpSubByOne(self,subNumCount):
|
||||
#记录所有的评论,这些评论是原来的文字
|
||||
|
||||
|
||||
subSenList=[]
|
||||
|
||||
smallSenNum=subNumCount
|
||||
bigSenNum=randint(0,self.sentenSize-1)
|
||||
|
||||
if(smallSenNum>bigSenNum):
|
||||
temp=smallSenNum
|
||||
smallSenNum=bigSenNum
|
||||
bigSenNum=temp
|
||||
|
||||
for j in range(self.sentenSize):
|
||||
if j==bigSenNum:
|
||||
|
||||
subSenList.append(self.senList[smallSenNum])
|
||||
elif j>=smallSenNum and j<bigSenNum:
|
||||
subSenList.append(self.senList[j+1])
|
||||
else:
|
||||
subSenList.append(self.senList[j])
|
||||
|
||||
|
||||
|
||||
fullSent=' '.join(subSenList)
|
||||
|
||||
|
||||
|
||||
|
||||
return fullSent #,sentenList
|
||||
|
||||
|
||||
def ChanTokenInDefinitedPart(self,wodNumCount,tokenLocInSub):
|
||||
#记录所有的评论,这些评论是原来的文字
|
||||
|
||||
|
||||
|
||||
subSenList=copy.deepcopy(self.senList)
|
||||
|
||||
|
||||
senLoc=tokenLocInSub[wodNumCount][0]
|
||||
wordLoc=tokenLocInSub[wodNumCount][1]
|
||||
|
||||
|
||||
subSenInLoc=subSenList[senLoc]
|
||||
|
||||
|
||||
senInLocList=subSenInLoc.split()
|
||||
|
||||
|
||||
ranWordLoc=randint(0,len(senInLocList)-1)
|
||||
|
||||
|
||||
temp=senInLocList[wordLoc]
|
||||
senInLocList[wordLoc]=senInLocList[ranWordLoc]
|
||||
senInLocList[ranWordLoc]=temp
|
||||
|
||||
|
||||
subSenList[senLoc]=' '.join(senInLocList)
|
||||
|
||||
|
||||
|
||||
fullSent=' '.join(subSenList)
|
||||
|
||||
|
||||
return fullSent #,sentenList
|
||||
|
||||
|
||||
|
||||
|
||||
def GetTokenImportance(self):
|
||||
wordSize=len(self.wordList)
|
||||
|
||||
senDifferCount=np.zeros(self.sentenSize,dtype=float)
|
||||
|
||||
tokenDifferCount=np.zeros(wordSize,dtype=float)
|
||||
|
||||
tokenLocInSub=np.zeros([wordSize,2],dtype=int)
|
||||
|
||||
wordLoc=0
|
||||
|
||||
|
||||
for i in range (len(self.senList)):
|
||||
for j in range (len(self.senList[i].split())):
|
||||
tokenLocInSub[wordLoc][0]=i
|
||||
tokenLocInSub[wordLoc][1]=j
|
||||
wordLoc+=1
|
||||
|
||||
|
||||
|
||||
iterations=self.iterations*4
|
||||
|
||||
wodNumCount=0
|
||||
calTimes=0
|
||||
counter=0
|
||||
|
||||
while True:
|
||||
comment=self.ChanTokenInDefinitedPart(wodNumCount,tokenLocInSub)
|
||||
|
||||
|
||||
res=self.PreRNN.Predict(comment)
|
||||
|
||||
|
||||
tokenDifferCount[wodNumCount]+=(res-self.oriRes)
|
||||
wodNumCount+=1
|
||||
counter+=1
|
||||
|
||||
if wodNumCount>=wordSize:
|
||||
wodNumCount=0
|
||||
calTimes+=1
|
||||
if counter>iterations:
|
||||
break
|
||||
|
||||
|
||||
|
||||
|
||||
tokenDifferCount=tokenDifferCount/calTimes
|
||||
|
||||
wordLoc=0
|
||||
for i in range (len(self.senList)):
|
||||
for j in range (len(self.senList[i].split())):
|
||||
senDifferCount[i]+=tokenDifferCount[wordLoc]
|
||||
wordLoc+=1
|
||||
|
||||
senDifferCount[i]/=len(self.senList[i].split())
|
||||
|
||||
return senDifferCount,tokenDifferCount,tokenLocInSub
|
||||
|
||||
|
||||
|
||||
def CalColor(self,percent,color):
|
||||
theColor="#"
|
||||
gray='c4d7d6'
|
||||
blue='baccd9'
|
||||
red='eeb8c3'
|
||||
print('colorPercent',percent)
|
||||
if color=='blue':
|
||||
for i in range(3):
|
||||
blueR=int(blue[i*2:i*2+2],16)
|
||||
grayR=int(gray[i*2:i*2+2],16)
|
||||
R=int((blueR-grayR)*percent)+grayR
|
||||
theColor+=hex(R)[2:].zfill(2)
|
||||
|
||||
elif color=='red':
|
||||
for i in range(3):
|
||||
redR=int(red[i*2:i*2+2],16)
|
||||
grayR=int(gray[i*2:i*2+2],16)
|
||||
R=int((redR-grayR)*percent)+grayR
|
||||
theColor+=hex(R)[2:].zfill(2)
|
||||
|
||||
|
||||
return theColor
|
||||
|
||||
def GetImportanceByColor(self):
|
||||
|
||||
#计算global 的重要性
|
||||
differCount=self.ReorderByOutGlobal()
|
||||
differCount=differCount.tolist()
|
||||
|
||||
|
||||
maxProb=np.max(differCount)
|
||||
minProb=np.min(differCount)
|
||||
maxMmin=abs(maxProb) if abs(maxProb)>abs(minProb) else abs(minProb)
|
||||
if maxMmin==0:
|
||||
maxMmin=1
|
||||
|
||||
colorCount=[]
|
||||
|
||||
for i in range(len(differCount)):
|
||||
|
||||
differCount[i]=(differCount[i])/maxMmin
|
||||
if(differCount[i]>0):
|
||||
colorCount.append(self.CalColor(differCount[i],'blue'))
|
||||
elif (differCount[i]<0):
|
||||
|
||||
colorCount.append(self.CalColor(-differCount[i],'red'))
|
||||
else:
|
||||
colorCount.append('#b2b9b4')
|
||||
|
||||
self.colorCount=colorCount
|
||||
self.differCount=differCount
|
||||
|
||||
|
||||
|
||||
|
||||
globalDataZip={'differCount':differCount,'colorCount':colorCount}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#计算local重要性
|
||||
senDifferCount,tokenDifferCount,tokenLocInSub=self.GetTokenImportance()
|
||||
|
||||
|
||||
senDifferCount=senDifferCount.tolist()
|
||||
localColorCount=[]
|
||||
|
||||
|
||||
maxProb=np.max(tokenDifferCount)
|
||||
minProb=np.min(tokenDifferCount)
|
||||
maxMmin=abs(maxProb) if abs(maxProb)>abs(minProb) else abs(minProb)
|
||||
if maxMmin==0:
|
||||
maxMmin=1
|
||||
|
||||
for i in range(len(tokenDifferCount)):
|
||||
|
||||
tokenDifferCount[i]=(tokenDifferCount[i])/maxMmin
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
maxProb=np.max(senDifferCount)
|
||||
minProb=np.min(senDifferCount)
|
||||
|
||||
maxMmin=abs(maxProb) if abs(maxProb)>abs(minProb) else abs(minProb)
|
||||
if maxMmin==0:
|
||||
maxMmin=1
|
||||
|
||||
|
||||
for i in range(len(senDifferCount)):
|
||||
senDifferCount[i]=(senDifferCount[i])/maxMmin
|
||||
# senDifferCount[i]=(senDifferCount[i]-minProb)/maxMmin
|
||||
# localColorCount.append(self.CalColor(senDifferCount[i],'red'))
|
||||
if(senDifferCount[i]>0):
|
||||
localColorCount.append(self.CalColor(senDifferCount[i],'blue'))
|
||||
elif (senDifferCount[i]<0):
|
||||
localColorCount.append(self.CalColor(-senDifferCount[i],'red'))
|
||||
else:
|
||||
localColorCount.append('#b2b9b4')
|
||||
|
||||
|
||||
tokenColorZip,tokenDifferCountZip = self.GetTokenColorZip(tokenDifferCount,tokenLocInSub)
|
||||
|
||||
|
||||
localDataZip={'localColorCount':localColorCount,'senDifferCount':senDifferCount,'tokenColorZip':tokenColorZip,'tokenDifferCountZip':tokenDifferCountZip}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
maxProb=np.max(senDifferCount)
|
||||
minProb=np.min(senDifferCount)
|
||||
threshold = abs(maxProb) if abs(maxProb)>abs(minProb) else abs(minProb)
|
||||
threshold*=0.8
|
||||
localOrderLine,lineDiffer=self.GetLocalOrderLine(senDifferCount,threshold)
|
||||
|
||||
print('lineDiffer',lineDiffer)
|
||||
|
||||
lineDifferColor=[]
|
||||
maxProb=np.max(lineDiffer)
|
||||
minProb=np.min(lineDiffer)
|
||||
for i in range(len(lineDiffer)):
|
||||
lineDiffer[i]=(lineDiffer[i])/maxMmin
|
||||
if(lineDiffer[i]>0):
|
||||
lineDifferColor.append(self.CalColor(lineDiffer[i],'blue'))
|
||||
elif (lineDiffer[i]<0):
|
||||
lineDifferColor.append(self.CalColor(-lineDiffer[i],'red'))
|
||||
else:
|
||||
lineDifferColor.append('#b2b9b4')
|
||||
|
||||
orderLineZip={'localOrderLine':localOrderLine,'lineDiffer':lineDiffer,'lineDifferColor':lineDifferColor}
|
||||
|
||||
return globalDataZip,localDataZip,orderLineZip
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def ShufflePatter(self,start,end):
|
||||
#记录所有的评论,这些评论是原来的文字
|
||||
#comment保存的评论,每个字都是用字典中的数字组成的
|
||||
|
||||
|
||||
|
||||
subSenList=copy.deepcopy(self.senList)
|
||||
|
||||
|
||||
subSenInLoc=' '.join(subSenList[start:end+1])
|
||||
|
||||
senInLocList=subSenInLoc.split()
|
||||
shuffle(senInLocList)
|
||||
|
||||
|
||||
fullSent=' '.join(subSenList[:start])+' '+' '.join(senInLocList)+' '+' '.join(subSenList[end+1:])
|
||||
|
||||
|
||||
|
||||
return fullSent
|
||||
|
||||
|
||||
def GetLocalOrderLine(self,differCount,threshold):
|
||||
critLoc=[]
|
||||
orderLine=[]
|
||||
lineDiffer=[]
|
||||
for i in range(len(differCount)):
|
||||
if(abs(differCount[i])>threshold):
|
||||
critLoc.append(i)
|
||||
|
||||
print(critLoc)
|
||||
for loc in range(len(critLoc)):
|
||||
|
||||
index=critLoc[loc]
|
||||
front=-1
|
||||
if loc>0:
|
||||
front=critLoc[loc-1]
|
||||
|
||||
back=self.sentenSize
|
||||
if loc<len(critLoc)-1:
|
||||
back=critLoc[loc+1]
|
||||
|
||||
start=index
|
||||
end=index
|
||||
|
||||
oneRes=None
|
||||
for i in range(10):
|
||||
comment=self.ShufflePatter(index,index)
|
||||
if oneRes:
|
||||
oneRes+=self.PreRNN.Predict(comment)
|
||||
else:
|
||||
oneRes=self.PreRNN.Predict(comment)
|
||||
oneRes = oneRes/10
|
||||
|
||||
print('oneRes',oneRes)
|
||||
|
||||
theRes=oneRes
|
||||
for froSen in range(index-1,front,-1):
|
||||
res=None
|
||||
for i in range(10):
|
||||
comment=self.ShufflePatter(index,index)
|
||||
if res:
|
||||
res+=self.PreRNN.Predict(comment)
|
||||
else:
|
||||
res=self.PreRNN.Predict(comment)
|
||||
res = res/10
|
||||
|
||||
if(abs(res-self.oriRes)<abs(oneRes-self.oriRes)):
|
||||
break
|
||||
start=froSen
|
||||
|
||||
for backSen in range(index+1,back):
|
||||
res=None
|
||||
for i in range(10):
|
||||
comment=self.ShufflePatter(index,index)
|
||||
if res:
|
||||
res+=self.PreRNN.Predict(comment)
|
||||
else:
|
||||
res=self.PreRNN.Predict(comment)
|
||||
res = res/10
|
||||
if(abs(res-self.oriRes)<abs(oneRes-self.oriRes)):
|
||||
break
|
||||
end=backSen
|
||||
theRes=res
|
||||
|
||||
theList=[]
|
||||
for i in range(start,end+1):
|
||||
theList.append(i)
|
||||
orderLine.append(theList)
|
||||
lineDiffer.append(theRes-self.oriRes)
|
||||
|
||||
print(orderLine)
|
||||
return orderLine,lineDiffer
|
||||
|
||||
|
||||
|
||||
def GetTokenColorZip(self,tokenDifferCount,tokenLocInSub):
|
||||
tokenColorZip=[]
|
||||
differCountZip=[]
|
||||
|
||||
|
||||
for i in range(self.sentenSize):
|
||||
differCount=[]
|
||||
|
||||
for j in range(len(tokenLocInSub)):
|
||||
if tokenLocInSub[j][0]==i:
|
||||
differCount.append(tokenDifferCount[j])
|
||||
|
||||
tokenColor=[]
|
||||
maxProb=np.max(differCount)
|
||||
minProb=np.min(differCount)
|
||||
|
||||
|
||||
for i in range(len(differCount)):
|
||||
|
||||
if(differCount[i]>0):
|
||||
decNum=int(differCount[i]*127/maxProb)+127
|
||||
color='#8888'+hex(decNum)[2:].zfill(2)
|
||||
tokenColor.append(color)
|
||||
elif (differCount[i]<0):
|
||||
decNum=int(differCount[i]*127/minProb)+127
|
||||
color='#'+hex(decNum)[2:].zfill(2)+'8888'
|
||||
tokenColor.append(color)
|
||||
else:
|
||||
tokenColor.append('#888888')
|
||||
|
||||
tokenColorZip.append(tokenColor)
|
||||
differCountZip.append(differCount)
|
||||
|
||||
return tokenColorZip,differCountZip
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def DealDataZip(self,DataZip):
|
||||
oriSenListZip=[]
|
||||
colorCountZip=[]
|
||||
differCountZip=[]
|
||||
|
||||
for data in DataZip:
|
||||
oriSenList=InputToSenList(data)
|
||||
oriSenListZip.append(oriSenList)
|
||||
#另一方面传递给机器模型,让其进行预测
|
||||
|
||||
#res=Predict(oriSenList,rnnType)
|
||||
|
||||
colorCount,differCount=self.GetImportanceByColor()
|
||||
colorCountZip.append(colorCount)
|
||||
differCount=differCount.tolist()
|
||||
differCountZip.append(differCount)
|
||||
|
||||
return oriSenListZip,colorCountZip,differCountZip
|
||||
|
||||
|
||||
|
||||
def GetDeatail(self):
|
||||
comment=[]
|
||||
|
||||
for i in range(self.sentenSize):
|
||||
subSenList=copy.deepcopy(self.senList)
|
||||
|
||||
theData=[]
|
||||
|
||||
for j in range(self.sentenSize):
|
||||
if j==i:
|
||||
theData.append(subSenList[i])
|
||||
else:
|
||||
theData.append('0 0 0')
|
||||
|
||||
fullSent=' '.join(theData)
|
||||
|
||||
# sentenList.append(fullSent)
|
||||
comment.append(fullSent.split())
|
||||
|
||||
|
||||
res=self.PreRNN.GetRes(comment)
|
||||
|
||||
return res
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
myinput="44210 50393 43088 31169 23567 27393 34057 36073 47778 41062 34351 34428 39507 39018 45137 49284 42159 38415 51972 "
|
||||
# myinput=input("输入")
|
||||
|
||||
MyReorder=ReorderByGroupCovid(myinput,'clause','judge')
|
||||
oriSenList=MyReorder.senList
|
||||
#先将当前的输入转化为list,一方面作为list传递给前端
|
||||
|
||||
#另一方面传递给机器模型,让其进行预测
|
||||
res=MyReorder.oriRes
|
||||
globalDataZip,localDataZip,orderLineZip = MyReorder.GetImportanceByColor()
|
||||
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1,189 @@
|
|||
from collections import Counter
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
import keras
|
||||
from sklearn.preprocessing import MinMaxScaler
|
||||
import re
|
||||
import copy
|
||||
from random import shuffle
|
||||
import math
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def InputToSenList(senten,model):
|
||||
mark=' mark! '
|
||||
#使用正则表达式确定是否要切分
|
||||
stripSpecialChars=re.compile("[^A-Za-z0-9 ]+")
|
||||
#把大写字母改成小写字母
|
||||
senten=senten.lower().replace('<br />','')
|
||||
#print(senten)
|
||||
#把所有的标点符号更换为mark
|
||||
subSenList=[]
|
||||
|
||||
if model=='clause':
|
||||
myinput=re.sub(stripSpecialChars,mark,senten)
|
||||
#wordVec保存的是token,即单词
|
||||
wordVec=myinput.split()
|
||||
|
||||
#markLoc保存mark!的位置,这就是标点符号的位置,作为切分子句的依据
|
||||
markLoc=[]
|
||||
markLoc.append(0)
|
||||
|
||||
shiftNum=0
|
||||
for i in range(len(wordVec)):
|
||||
if wordVec[i-shiftNum]=='mark!':
|
||||
markLoc.append(i-shiftNum)
|
||||
wordVec.pop(i-shiftNum)
|
||||
shiftNum+=1
|
||||
|
||||
#按照标点符号划分子句,把每个子句放入subSenList
|
||||
for i in range(len(markLoc)-1):
|
||||
subSenList.append(" ".join(wordVec[markLoc[i]:markLoc[i+1]]))
|
||||
else:
|
||||
myinput=re.sub(stripSpecialChars,' ',senten)
|
||||
#wordVec保存的是token,即单词
|
||||
subSenList=myinput.split()
|
||||
|
||||
return subSenList
|
||||
|
||||
|
||||
|
||||
|
||||
from random import randint
|
||||
|
||||
|
||||
|
||||
def ChanInpSubByOne(senList,subNumCount):
|
||||
sentenSize=len(senList)
|
||||
subSenList=[]
|
||||
|
||||
smallSenNum=subNumCount
|
||||
bigSenNum=randint(0,sentenSize-1)
|
||||
|
||||
if(smallSenNum>bigSenNum):
|
||||
temp=smallSenNum
|
||||
smallSenNum=bigSenNum
|
||||
bigSenNum=temp
|
||||
|
||||
for j in range(sentenSize):
|
||||
if j==bigSenNum:
|
||||
|
||||
subSenList.append(senList[smallSenNum])
|
||||
elif j>=smallSenNum and j<bigSenNum:
|
||||
subSenList.append(senList[j+1])
|
||||
else:
|
||||
subSenList.append(senList[j])
|
||||
|
||||
|
||||
|
||||
fullSent=' '.join(subSenList)
|
||||
|
||||
|
||||
|
||||
|
||||
return fullSent
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def Rrtreat(trainData,PreRNN):
|
||||
print('preRNNOk')
|
||||
# PreRNN=aRNNModel()
|
||||
#定义好的RNN
|
||||
|
||||
#原序列的预测值
|
||||
|
||||
|
||||
|
||||
|
||||
# trainData=PreRNN.trainData
|
||||
|
||||
|
||||
|
||||
|
||||
TrainDataDiffer=np.zeros([len(trainData),10])
|
||||
# startNum=10900
|
||||
# TrainDataDetail=np.load('./TrainDateForXplain_'+str(startNum)+'.npy')
|
||||
|
||||
for i in range(len(trainData)):
|
||||
|
||||
|
||||
theDifferCount=[]
|
||||
|
||||
comment=trainData[i]
|
||||
# comment=comment.tolist()
|
||||
# print('comment',comment)
|
||||
# comment=' '.join(comment)
|
||||
|
||||
|
||||
# comment=' '.join(commentList)
|
||||
|
||||
senList=InputToSenList(comment,'clause')
|
||||
|
||||
sentenSize=len(senList)
|
||||
|
||||
# if(sentenSize<=2):
|
||||
# continue
|
||||
#记录每次重新排序以后和原来结果的差值
|
||||
differCount=np.zeros(sentenSize,dtype=float)
|
||||
|
||||
|
||||
|
||||
iterations=15
|
||||
|
||||
# print('senlist',senList)
|
||||
oriRes=PreRNN.Predict(' '.join(senList))
|
||||
|
||||
|
||||
counter=0
|
||||
for l in range(sentenSize):
|
||||
|
||||
counter+=1
|
||||
for k in range(iterations):
|
||||
thecomment=ChanInpSubByOne(senList,l)
|
||||
|
||||
|
||||
res=PreRNN.Predict(' '.join(thecomment))
|
||||
|
||||
calGap=0
|
||||
for m in range(len(res)):
|
||||
calGap+=abs(res[m]-oriRes[m])
|
||||
|
||||
calGap/=len(res)
|
||||
|
||||
differCount[l]+=calGap
|
||||
|
||||
|
||||
if(counter==10):
|
||||
print('!!error')
|
||||
break
|
||||
|
||||
|
||||
|
||||
theDifferCount=differCount/iterations
|
||||
counter=0
|
||||
for num in theDifferCount:
|
||||
TrainDataDiffer[i][counter]=num
|
||||
counter+=1
|
||||
if(counter>=10):
|
||||
break
|
||||
|
||||
|
||||
if i%1000 == 0:
|
||||
print('i',i)
|
||||
thePath='./TrainDataDifferRandomByOne_'+str(i)+'.npy'
|
||||
np.save(thePath,TrainDataDiffer)
|
||||
|
||||
|
||||
np.save('./TrainDataDiffer',TrainDataDiffer)
|
||||
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
Rrtreat()
|
|
@ -0,0 +1,6 @@
|
|||
|
||||
# from Flask import ToRun
|
||||
# #从当前文件引入test1模块的test1函数
|
||||
|
||||
# if __name__=='__main__':
|
||||
# print 'as the main program'
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1,49 @@
|
|||
#!/bin/bash
|
||||
#####################################
|
||||
## File name : check_code.sh
|
||||
## Create date : 2018-11-25 15:57
|
||||
## Modified date : 2019-02-18 14:23
|
||||
## Author : DARREN
|
||||
## Describe : not set
|
||||
## Email : lzygzh@126.com
|
||||
####################################
|
||||
|
||||
realpath=$(readlink -f "$0")
|
||||
export basedir=$(dirname "$realpath")
|
||||
export filename=$(basename "$realpath")
|
||||
export PATH=$PATH:$basedir/dlbase
|
||||
export PATH=$PATH:$basedir/dlproc
|
||||
#base sh file
|
||||
. dlbase.sh
|
||||
#function sh file
|
||||
. etc.sh
|
||||
|
||||
source $env_path/py2env/bin/activate
|
||||
pylint --rcfile=pylint.conf base_graph.py
|
||||
pylint --rcfile=pylint.conf etc.py
|
||||
pylint --rcfile=pylint.conf graph.py
|
||||
pylint --rcfile=pylint.conf main.py
|
||||
pylint --rcfile=pylint.conf record.py
|
||||
pylint --rcfile=pylint.conf rnn_model.py
|
||||
pylint --rcfile=pylint.conf show.py
|
||||
pylint --rcfile=pylint.conf status.py
|
||||
pylint --rcfile=pylint.conf test_graph.py
|
||||
pylint --rcfile=pylint.conf train_graph.py
|
||||
pylint --rcfile=pylint.conf name_dataset.py
|
||||
pip freeze > python3_requiements.txt
|
||||
deactivate
|
||||
|
||||
source $env_path/py3env/bin/activate
|
||||
pylint --rcfile=pylint.conf base_graph.py
|
||||
pylint --rcfile=pylint.conf etc.py
|
||||
pylint --rcfile=pylint.conf graph.py
|
||||
pylint --rcfile=pylint.conf main.py
|
||||
pylint --rcfile=pylint.conf record.py
|
||||
pylint --rcfile=pylint.conf rnn_model.py
|
||||
pylint --rcfile=pylint.conf show.py
|
||||
pylint --rcfile=pylint.conf status.py
|
||||
pylint --rcfile=pylint.conf test_graph.py
|
||||
pylint --rcfile=pylint.conf train_graph.py
|
||||
pylint --rcfile=pylint.conf name_dataset.py
|
||||
pip freeze > python2_requiements.txt
|
||||
deactivate
|
|
@ -0,0 +1,53 @@
|
|||
#!/bin/bash
|
||||
#####################################
|
||||
## File name : create_env.sh
|
||||
## Create date : 2018-11-25 15:54
|
||||
## Modified date : 2019-02-02 17:45
|
||||
## Author : DARREN
|
||||
## Describe : not set
|
||||
## Email : lzygzh@126.com
|
||||
####################################
|
||||
|
||||
realpath=$(readlink -f "$0")
|
||||
export basedir=$(dirname "$realpath")
|
||||
export filename=$(basename "$realpath")
|
||||
export PATH=$PATH:$basedir/dlbase
|
||||
export PATH=$PATH:$basedir/dlproc
|
||||
#base sh file
|
||||
. dlbase.sh
|
||||
#function sh file
|
||||
. etc.sh
|
||||
#asumming installed vitralenv
|
||||
|
||||
rm -rf $env_path
|
||||
mkdir $env_path
|
||||
cd $env_path
|
||||
virtualenv -p /usr/bin/python2 py2env
|
||||
source $env_path/py2env/bin/activate
|
||||
pip install Pillow
|
||||
#pip install tornado
|
||||
#pip install mysqlclient
|
||||
pip install -i https://pypi.tuna.tsinghua.edu.cn/simple --upgrade numpy
|
||||
pip install -i https://pypi.tuna.tsinghua.edu.cn/simple --upgrade matplotlib==2.2.2
|
||||
pip install -i https://pypi.tuna.tsinghua.edu.cn/simple --upgrade torch
|
||||
pip install -i https://pypi.tuna.tsinghua.edu.cn/simple --upgrade torchvision
|
||||
#pip install -i https://pypi.tuna.tsinghua.edu.cn/simple --upgrade scikit-image
|
||||
#pip install -i https://pypi.tuna.tsinghua.edu.cn/simple --upgrade pandas
|
||||
#pip install -i https://pypi.tuna.tsinghua.edu.cn/simple --upgrade ipython
|
||||
|
||||
deactivate
|
||||
virtualenv -p /usr/bin/python3 py3env
|
||||
source $env_path/py3env/bin/activate
|
||||
pip install Pillow
|
||||
#pip install tornado
|
||||
#pip install mysqlclient
|
||||
#3.5 现在还不支持MySQLdb
|
||||
#pip install PyMySQL
|
||||
pip install -i https://pypi.tuna.tsinghua.edu.cn/simple --upgrade numpy
|
||||
pip install -i https://pypi.tuna.tsinghua.edu.cn/simple --upgrade matplotlib
|
||||
pip install -i https://pypi.tuna.tsinghua.edu.cn/simple --upgrade torch
|
||||
pip install -i https://pypi.tuna.tsinghua.edu.cn/simple --upgrade torchvision
|
||||
#pip install -i https://pypi.tuna.tsinghua.edu.cn/simple --upgrade scikit-image
|
||||
#pip install -i https://pypi.tuna.tsinghua.edu.cn/simple --upgrade pandas
|
||||
#pip install -i https://pypi.tuna.tsinghua.edu.cn/simple --upgrade ipython
|
||||
deactivate
|
|
@ -0,0 +1,12 @@
|
|||
#!/bin/bash
|
||||
if [ "$dlbase" ]; then
|
||||
return
|
||||
fi
|
||||
|
||||
export dlbase="dlbase.sh"
|
||||
|
||||
. dllog.sh
|
||||
. dlfile.sh
|
||||
. dlgit.sh
|
||||
. dlroot.sh
|
||||
. dlboot.sh
|
|
@ -0,0 +1,57 @@
|
|||
#!/bin/bash
|
||||
if [ "$dlboot" ]; then
|
||||
return
|
||||
fi
|
||||
|
||||
export dlboot="dlboot.sh"
|
||||
|
||||
. dllog.sh
|
||||
. dlfile.sh
|
||||
|
||||
function dlboot_create_auto_start_file(){
|
||||
local user_name="$1"
|
||||
local file_name="$2"
|
||||
local file_path="/home/$user_name"
|
||||
local file_full_name="$file_path/$file_name"
|
||||
$DLLOG_INFO $file_full_name
|
||||
dlfile_check_is_have_file $file_full_name
|
||||
|
||||
if [[ $? -eq 0 ]]; then
|
||||
echo \#!/bin/sh > $file_full_name
|
||||
chmod +x $file_full_name
|
||||
dlboot_set_shell_auto_run "$user_name" "$file_full_name"
|
||||
else
|
||||
echo \#!/bin/sh > $file_full_name
|
||||
chmod +x $file_full_name
|
||||
$DLLOG_INFO "$fill_full_name exist auto run file"
|
||||
fi
|
||||
}
|
||||
|
||||
function dlboot_set_shell_auto_run(){
|
||||
local user_name="$1"
|
||||
local file_full_name="$2"
|
||||
echo su $user_name -c \"$file_full_name\" >> /etc/rc.d/rc.local
|
||||
chmod +x /etc/rc.d/rc.local
|
||||
}
|
||||
|
||||
function dlboot_set_progress_auto_start(){
|
||||
local user_name="$1"
|
||||
local file_name="$2"
|
||||
local content="$3"
|
||||
local file_path="/home/$user_name"
|
||||
local file_full_name="$file_path/$file_name"
|
||||
dlfile_check_is_have_file $file_full_name
|
||||
if [[ $? -eq 0 ]]; then
|
||||
dlboot_create_auto_start_file "$user_name" "$file_name"
|
||||
echo -e $content >> $file_full_name
|
||||
else
|
||||
echo -e $content >> $file_full_name
|
||||
fi
|
||||
}
|
||||
|
||||
if [ -n "$BASH_SOURCE" -a "$BASH_SOURCE" != "$0" ]
|
||||
then
|
||||
echo
|
||||
else
|
||||
echo self run
|
||||
fi
|
|
@ -0,0 +1,82 @@
|
|||
#!/bin/bash
|
||||
if [ "$dlfile" ]; then
|
||||
return
|
||||
fi
|
||||
|
||||
export dlfile="dlfile"
|
||||
|
||||
. dllog.sh
|
||||
|
||||
function dlfile_create_dir(){
|
||||
mkdir $1
|
||||
if [[ $? -eq 0 ]]; then
|
||||
echo create $1 folder sucess;
|
||||
return 0
|
||||
else
|
||||
echo create $1 folder fail;
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
function dlfile_touch_file(){
|
||||
if [ ! -f "$1" ]; then
|
||||
touch "$1"
|
||||
fi
|
||||
}
|
||||
|
||||
function dlfile_check_is_have_dir(){
|
||||
if [ ! -d "$1" ]; then
|
||||
# echo $1 folder do not exist
|
||||
return 0
|
||||
else
|
||||
# echo $1 folder exist
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
function dlfile_check_is_have_file(){
|
||||
if [ ! -f "$1" ]; then
|
||||
$DLLOG_DEBUG "$1 file do not exit"
|
||||
return 0
|
||||
else
|
||||
$DLLOG_DEBUG "$1 file exit"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
function dlfile_try_create_dir(){
|
||||
dlfile_check_is_have_dir $1
|
||||
if [[ $? -eq 0 ]]; then
|
||||
dlfile_create_dir $1
|
||||
fi
|
||||
}
|
||||
|
||||
function dlfile_scp_file(){
|
||||
local copy_comand=$1
|
||||
local file_host_password=$2
|
||||
local file_copy_full_name=$3
|
||||
|
||||
dlfile_check_is_have_file "$file_copy_full_name"
|
||||
|
||||
if [[ $? -eq 0 ]]; then
|
||||
dlfile_scp_file_without_check "$copy_comand" "$file_host_password"
|
||||
else
|
||||
$DLLOG_INFO "$3 have the file"
|
||||
fi
|
||||
}
|
||||
|
||||
function dlfile_scp_file_without_check(){
|
||||
local copy_comand=$1
|
||||
local file_host_password=$2
|
||||
# should install expect
|
||||
# yum -y install expect
|
||||
|
||||
expect -c "
|
||||
spawn scp -r $copy_comand
|
||||
expect {
|
||||
\"*assword\" {set timeout 300; send \"$file_host_password\r\";}
|
||||
\"yes/no\" {send \"yes\r\"; exp_continue;}
|
||||
}
|
||||
expect eof"
|
||||
}
|
||||
|
|
@ -0,0 +1,35 @@
|
|||
#!/bin/bash
|
||||
if [ "$dlgit" ]; then
|
||||
return
|
||||
fi
|
||||
|
||||
export dlgit="dlgit.sh"
|
||||
|
||||
. dllog.sh
|
||||
|
||||
function dlgit_down(){
|
||||
local down_url="$1"
|
||||
local folder="$2"
|
||||
|
||||
dlfile_check_is_have_dir $folder
|
||||
|
||||
if [[ $? -eq 0 ]]; then
|
||||
dlfile_try_create_dir "$folder"
|
||||
git clone $down_url "$folder"
|
||||
else
|
||||
$DLLOG_INFO "$1 git had been clone"
|
||||
fi
|
||||
}
|
||||
|
||||
function dlgit_clone_git(){
|
||||
local down_url="$1"
|
||||
local folder="$2"
|
||||
dlgit_down $down_url $folder
|
||||
}
|
||||
|
||||
if [ -n "$BASH_SOURCE" -a "$BASH_SOURCE" != "$0" ]
|
||||
then
|
||||
echo
|
||||
else
|
||||
echo fun self
|
||||
fi
|
|
@ -0,0 +1,55 @@
|
|||
#!/bin/bash
|
||||
if [ "$dllog" ]; then
|
||||
return
|
||||
fi
|
||||
|
||||
export dllog="dllog.sh"
|
||||
|
||||
. dlfile.sh
|
||||
|
||||
log_path="./shlog"
|
||||
|
||||
DLLOG_DEBUG="dllog_log debug $LINENO $0"
|
||||
DLLOG_INFO="dllog_log info $LINENO $0"
|
||||
DLLOG_WARNING="dllog_log warning $LINENO $0"
|
||||
DLLOG_ERROR="dllog_log error $LINENO $0"
|
||||
|
||||
function dllog_log(){
|
||||
local now_date=`date "+%Y-%m-%d %H:%M:%S"`
|
||||
local user_name=`whoami`
|
||||
local log_level=$1
|
||||
local line_number=$2
|
||||
local file_name=$3
|
||||
local content=$4
|
||||
local now_day=`date "+%Y-%m-%d"`
|
||||
|
||||
local log_content="LEVEL:$log_level\tUSER:$user_name\tFILE:$file_name\tLINE:$line_number\tFUNC:${FUNCNAME[1]}\tDATE:$now_date CONTENT:$content"
|
||||
local log_debug_content="LINE:$line_number\tFUNC:${FUNCNAME[1]}\tFILE:$file_name\tCONTENT:$content"
|
||||
local log_write_content="LEVEL:$log_level USER:$user_name FILE:$file_name LINE:$line_number FUNC:${FUNCNAME[1]} DATE:$now_date CONTENT:$content"
|
||||
|
||||
if [ "$log_level" == "error" ] ; then
|
||||
echo -e "\033[31m$log_content\033[0m"
|
||||
elif [ "$log_level" == "info" ] ; then
|
||||
echo -e "\033[32m$log_debug_content\033[0m"
|
||||
elif [ "$log_level" == "warning" ] ; then
|
||||
echo -e "\033[33m$log_content\033[0m"
|
||||
else
|
||||
echo -e "\033[32m$log_debug_content\033[0m"
|
||||
fi
|
||||
dlfile_try_create_dir "$log_path"
|
||||
echo $log_write_content >> "$log_path/$now_day.log"
|
||||
}
|
||||
|
||||
function log_test(){
|
||||
$DLLOG_DEBUG "debug test"
|
||||
$DLLOG_INFO "test"
|
||||
$DLLOG_WARNING "test"
|
||||
$DLLOG_ERROR "error test"
|
||||
}
|
||||
|
||||
if [ -n "$BASH_SOURCE" -a "$BASH_SOURCE" != "$0" ]
|
||||
then
|
||||
echo
|
||||
else
|
||||
log_test
|
||||
fi
|
|
@ -0,0 +1,56 @@
|
|||
#!/bin/bash
|
||||
if [ "$dlroot" ]; then
|
||||
return
|
||||
fi
|
||||
|
||||
export dlroot="dlroot.sh"
|
||||
|
||||
. dllog.sh
|
||||
|
||||
function dlroot_check_root_user(){
|
||||
local user_name=`whoami`
|
||||
if [ "$user_name" == "root" ] ; then
|
||||
return 0
|
||||
else
|
||||
$DLLOG_ERROR "need run with root"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
function dlroot_change_file_owner(){
|
||||
local user_name=$1
|
||||
local file_path=$2
|
||||
dlroot_check_root_user
|
||||
|
||||
if [[ $? -eq 0 ]]; then
|
||||
chown -R $user_name:$user_name "$file_path"
|
||||
else
|
||||
return
|
||||
fi
|
||||
}
|
||||
|
||||
function dlroot_create_and_write_file(){
|
||||
local content=$1
|
||||
local file_path=$2
|
||||
dlroot_check_root_user
|
||||
|
||||
if [[ $? -eq 0 ]]; then
|
||||
echo -e $content > $file_path
|
||||
else
|
||||
return
|
||||
fi
|
||||
}
|
||||
|
||||
function dlroot_write_to_file_back(){
|
||||
local content=$1
|
||||
local file_path=$2
|
||||
echo -e $content >> $file_path
|
||||
}
|
||||
|
||||
if [ -n "$BASH_SOURCE" -a "$BASH_SOURCE" != "$0" ]
|
||||
then
|
||||
echo
|
||||
else
|
||||
dlroot_check_root_user
|
||||
echo run self
|
||||
fi
|
|
@ -0,0 +1,96 @@
|
|||
#!/bin/bash
|
||||
if [ "$dluser" ]; then
|
||||
echo do not need import dluser.sh
|
||||
return
|
||||
fi
|
||||
|
||||
export dluser="dluser.sh"
|
||||
|
||||
. dlroot.sh
|
||||
|
||||
function dluser_find_user(){
|
||||
user_name=$1
|
||||
[ -z $1 ] && return 0
|
||||
if id $1 &> /dev/null ;then
|
||||
return 1
|
||||
else
|
||||
return 0
|
||||
fi
|
||||
}
|
||||
|
||||
function dluser_del_user(){
|
||||
user_name=$1
|
||||
dluser_find_user $user_name
|
||||
|
||||
Res=$?
|
||||
if [ $Res -eq 1 ]
|
||||
then
|
||||
dlroot_check_root_user
|
||||
if [[ $? -eq 0 ]]; then
|
||||
$DLLOG_INFO "delete the user:$user_name"
|
||||
userdel -r $user_name
|
||||
else
|
||||
return
|
||||
fi
|
||||
else
|
||||
$DLLOG_INFO "do not have the user"
|
||||
fi
|
||||
}
|
||||
|
||||
function dluser_create_user(){
|
||||
user_name=$1
|
||||
dluser_find_user $user_name
|
||||
|
||||
Res=$?
|
||||
if [ $Res -eq 1 ]
|
||||
then
|
||||
$DLLOG_INFO "have the user"
|
||||
else
|
||||
dlroot_check_root_user
|
||||
if [[ $? -eq 0 ]]; then
|
||||
$DLLOG_INFO "do not have the user:$user_name create the user"
|
||||
adduser $1
|
||||
else
|
||||
return
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
function dluser_set_user_password(){
|
||||
user_name=$1
|
||||
user_password=$2
|
||||
|
||||
dluser_find_user $user_name
|
||||
|
||||
Res=$?
|
||||
if [ $Res -eq 1 ]
|
||||
then
|
||||
#判断密码是否被锁定
|
||||
is_user_locked=`passwd -S $user_name|awk '{print $2}'`
|
||||
if [ "$is_user_locked" == "LK" ]
|
||||
then
|
||||
$DLLOG_INFO "You need input user password"
|
||||
passwd $user_name;
|
||||
elif [ "$is_user_locked" == "PS" ]
|
||||
then
|
||||
$DLLOG_INFO "user password has been set before,you can check it"
|
||||
else
|
||||
$DLLOG_WARNING "unknow lock status: $is_user_locked"
|
||||
fi
|
||||
else
|
||||
$DLLOG_WARNING "do not have the user:$user_name"
|
||||
fi
|
||||
}
|
||||
|
||||
function dluser_create_user_and_set_password(){
|
||||
user_name=$1
|
||||
dluser_create_user $user_name
|
||||
dluser_set_user_password $user_name
|
||||
}
|
||||
|
||||
if [ -n "$BASH_SOURCE" -a "$BASH_SOURCE" != "$0" ]
|
||||
then
|
||||
echo
|
||||
else
|
||||
echo "run self"
|
||||
fi
|
|
@ -0,0 +1,28 @@
|
|||
#!/bin/bash
|
||||
#####################################
|
||||
## File name : dlbase_test.sh
|
||||
## Create date : 2018-11-25 17:37
|
||||
## Modified date : 2018-11-25 17:38
|
||||
## Author : DARREN
|
||||
## Describe : not set
|
||||
## Email : lzygzh@126.com
|
||||
####################################
|
||||
|
||||
realpath=$(readlink -f "$0")
|
||||
basedir=$(dirname "$realpath")
|
||||
export basedir=$basedir/../
|
||||
export filename=$(basename "$realpath")
|
||||
export PATH=$PATH:$basedir/dlbase
|
||||
export PATH=$PATH:$basedir/dlproc
|
||||
#base sh file
|
||||
. dlbase.sh
|
||||
#function sh file
|
||||
. dlgit_test.sh
|
||||
. dllog_test.sh
|
||||
|
||||
|
||||
function dlbase_test(){
|
||||
dlgit_test
|
||||
dllog_test
|
||||
}
|
||||
|
|
@ -0,0 +1,39 @@
|
|||
#!/bin/bash
|
||||
#####################################
|
||||
## File name : dlgit_test.sh
|
||||
## Create date : 2018-11-25 16:56
|
||||
## Modified date : 2019-01-27 22:21
|
||||
## Author : DARREN
|
||||
## Describe : not set
|
||||
## Email : lzygzh@126.com
|
||||
####################################
|
||||
|
||||
realpath=$(readlink -f "$0")
|
||||
basedir=$(dirname "$realpath")
|
||||
export basedir=$basedir/../
|
||||
export filename=$(basename "$realpath")
|
||||
export PATH=$PATH:$basedir/dlbase
|
||||
export PATH=$PATH:$basedir/dlproc
|
||||
#base sh file
|
||||
. dlbase.sh
|
||||
#function sh file
|
||||
|
||||
function dlgit_down_test(){
|
||||
down_url="https://github.com/matplotlib/matplotlib.git"
|
||||
folder="/tmp/matplotlib"
|
||||
dlgit_clone_git $down_url $folder
|
||||
rm -rf /home/liuzy/matplotlib
|
||||
}
|
||||
|
||||
function dlgit_down_test2(){
|
||||
down_url="https://github.com/darr/pybase.git"
|
||||
folder="./pybase"
|
||||
dlgit_clone_git $down_url $folder
|
||||
rm -rf ./pybase
|
||||
}
|
||||
|
||||
function dlgit_test(){
|
||||
dlgit_down_test2
|
||||
}
|
||||
|
||||
dlgit_test
|
|
@ -0,0 +1,32 @@
|
|||
#!/bin/bash
|
||||
#####################################
|
||||
## File name : dllog_test.sh
|
||||
## Create date : 2018-11-25 17:32
|
||||
## Modified date : 2018-11-25 17:37
|
||||
## Author : DARREN
|
||||
## Describe : not set
|
||||
## Email : lzygzh@126.com
|
||||
####################################
|
||||
|
||||
realpath=$(readlink -f "$0")
|
||||
basedir=$(dirname "$realpath")
|
||||
export basedir=$basedir/../
|
||||
export filename=$(basename "$realpath")
|
||||
export PATH=$PATH:$basedir/dlbase
|
||||
export PATH=$PATH:$basedir/dlproc
|
||||
#base sh file
|
||||
. dlbase.sh
|
||||
#function sh file
|
||||
|
||||
function dllog_log_test(){
|
||||
$DLLOG_DEBUG "调试 debug"
|
||||
$DLLOG_INFO "信息 info"
|
||||
$DLLOG_WARNING "警告 warning"
|
||||
$DLLOG_ERROR "错误 error"
|
||||
}
|
||||
|
||||
function dllog_test(){
|
||||
dllog_log_test
|
||||
}
|
||||
|
||||
dllog_test
|
|
@ -0,0 +1,20 @@
|
|||
#!/bin/bash
|
||||
#####################################
|
||||
## File name : etc.sh
|
||||
## Create date : 2018-11-25 15:53
|
||||
## Modified date : 2019-02-19 13:50
|
||||
## Author : DARREN
|
||||
## Describe : not set
|
||||
## Email : lzygzh@126.com
|
||||
####################################
|
||||
|
||||
realpath=$(readlink -f "$0")
|
||||
export basedir=$(dirname "$realpath")
|
||||
export filename=$(basename "$realpath")
|
||||
export PATH=$PATH:$basedir/dlbase
|
||||
export PATH=$PATH:$basedir/dlproc
|
||||
#base sh file
|
||||
. dlbase.sh
|
||||
#function sh file
|
||||
|
||||
env_path=~/.classify_names_with_rnn_env
|
|
@ -0,0 +1,46 @@
|
|||
#!/bin/bash
|
||||
#####################################
|
||||
## File name : install_pybase.sh
|
||||
## Create date : 2018-11-25 16:03
|
||||
## Modified date : 2019-01-27 22:34
|
||||
## Author : DARREN
|
||||
## Describe : not set
|
||||
## Email : lzygzh@126.com
|
||||
####################################
|
||||
|
||||
realpath=$(readlink -f "$0")
|
||||
export basedir=$(dirname "$realpath")
|
||||
export filename=$(basename "$realpath")
|
||||
export PATH=$PATH:$basedir/dlbase
|
||||
export PATH=$PATH:$basedir/dlproc
|
||||
#base sh file
|
||||
. dlbase.sh
|
||||
#function sh file
|
||||
. etc.sh
|
||||
|
||||
function dlgit_down_pybase(){
|
||||
down_url="https://github.com/darr/pybase.git"
|
||||
folder="./pybase"
|
||||
dlgit_clone_git $down_url $folder
|
||||
}
|
||||
|
||||
function dlgit_rm_pybase(){
|
||||
rm -rf ./pybase
|
||||
}
|
||||
|
||||
dlgit_down_pybase
|
||||
|
||||
source $env_path/py2env/bin/activate
|
||||
pybase_path=./pybase
|
||||
cd $pybase_path
|
||||
pwd
|
||||
bash ./set_up.sh
|
||||
cd ..
|
||||
deactivate
|
||||
|
||||
source $env_path/py3env/bin/activate
|
||||
pybase_path=./pybase
|
||||
cd $pybase_path
|
||||
pwd
|
||||
bash ./set_up.sh
|
||||
deactivate
|
|
@ -0,0 +1,564 @@
|
|||
[MASTER]
|
||||
|
||||
# A comma-separated list of package or module names from where C extensions may
|
||||
# be loaded. Extensions are loading into the active Python interpreter and may
|
||||
# run arbitrary code
|
||||
extension-pkg-whitelist=
|
||||
|
||||
# Add files or directories to the blacklist. They should be base names, not
|
||||
# paths.
|
||||
ignore=CVS
|
||||
|
||||
# Add files or directories matching the regex patterns to the blacklist. The
|
||||
# regex matches against base names, not paths.
|
||||
ignore-patterns=
|
||||
|
||||
# Python code to execute, usually for sys.path manipulation such as
|
||||
# pygtk.require().
|
||||
#init-hook=
|
||||
|
||||
# Use multiple processes to speed up Pylint.
|
||||
jobs=1
|
||||
|
||||
# List of plugins (as comma separated values of python modules names) to load,
|
||||
# usually to register additional checkers.
|
||||
load-plugins=
|
||||
|
||||
# Pickle collected data for later comparisons.
|
||||
persistent=no
|
||||
|
||||
# Specify a configuration file.
|
||||
#rcfile=
|
||||
|
||||
# When enabled, pylint would attempt to guess common misconfiguration and emit
|
||||
# user-friendly hints instead of false-positive error messages
|
||||
suggestion-mode=yes
|
||||
|
||||
# Allow loading of arbitrary C extensions. Extensions are imported into the
|
||||
# active Python interpreter and may run arbitrary code.
|
||||
unsafe-load-any-extension=no
|
||||
|
||||
|
||||
[MESSAGES CONTROL]
|
||||
|
||||
# Only show warnings with the listed confidence levels. Leave empty to show
|
||||
# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED
|
||||
confidence=
|
||||
|
||||
# Disable the message, report, category or checker with the given id(s). You
|
||||
# can either give multiple identifiers separated by comma (,) or put this
|
||||
# option multiple times (only on the command line, not in the configuration
|
||||
# file where it should appear only once).You can also use "--disable=all" to
|
||||
# disable everything first and then reenable specific checks. For example, if
|
||||
# you want to run only the similarities checker, you can use "--disable=all
|
||||
# --enable=similarities". If you want to run only the classes checker, but have
|
||||
# no Warning level messages displayed, use"--disable=all --enable=classes
|
||||
# --disable=W"
|
||||
disable=print-statement,
|
||||
parameter-unpacking,
|
||||
unpacking-in-except,
|
||||
old-raise-syntax,
|
||||
backtick,
|
||||
long-suffix,
|
||||
old-ne-operator,
|
||||
old-octal-literal,
|
||||
import-star-module-level,
|
||||
non-ascii-bytes-literal,
|
||||
invalid-unicode-literal,
|
||||
raw-checker-failed,
|
||||
bad-inline-option,
|
||||
locally-disabled,
|
||||
locally-enabled,
|
||||
file-ignored,
|
||||
suppressed-message,
|
||||
useless-suppression,
|
||||
deprecated-pragma,
|
||||
apply-builtin,
|
||||
basestring-builtin,
|
||||
buffer-builtin,
|
||||
cmp-builtin,
|
||||
coerce-builtin,
|
||||
execfile-builtin,
|
||||
file-builtin,
|
||||
long-builtin,
|
||||
raw_input-builtin,
|
||||
reduce-builtin,
|
||||
standarderror-builtin,
|
||||
unicode-builtin,
|
||||
xrange-builtin,
|
||||
coerce-method,
|
||||
delslice-method,
|
||||
getslice-method,
|
||||
setslice-method,
|
||||
no-absolute-import,
|
||||
old-division,
|
||||
dict-iter-method,
|
||||
dict-view-method,
|
||||
next-method-called,
|
||||
metaclass-assignment,
|
||||
indexing-exception,
|
||||
raising-string,
|
||||
reload-builtin,
|
||||
oct-method,
|
||||
hex-method,
|
||||
nonzero-method,
|
||||
cmp-method,
|
||||
input-builtin,
|
||||
round-builtin,
|
||||
intern-builtin,
|
||||
unichr-builtin,
|
||||
map-builtin-not-iterating,
|
||||
zip-builtin-not-iterating,
|
||||
range-builtin-not-iterating,
|
||||
filter-builtin-not-iterating,
|
||||
using-cmp-argument,
|
||||
eq-without-hash,
|
||||
div-method,
|
||||
idiv-method,
|
||||
rdiv-method,
|
||||
exception-message-attribute,
|
||||
invalid-str-codec,
|
||||
sys-max-int,
|
||||
bad-python3-import,
|
||||
deprecated-string-function,
|
||||
deprecated-str-translate-call,
|
||||
deprecated-itertools-function,
|
||||
deprecated-types-field,
|
||||
next-method-defined,
|
||||
dict-items-not-iterating,
|
||||
dict-keys-not-iterating,
|
||||
dict-values-not-iterating,
|
||||
deprecated-operator-function,
|
||||
deprecated-urllib-function,
|
||||
xreadlines-attribute,
|
||||
deprecated-sys-function,
|
||||
C0111, #self add disable docstring check
|
||||
line-too-long, #self add
|
||||
relative-import, #self add
|
||||
global-statement,
|
||||
import-error,
|
||||
exception-escape,
|
||||
comprehension-escape
|
||||
|
||||
# Enable the message, report, category or checker with the given id(s). You can
|
||||
# either give multiple identifier separated by comma (,) or put this option
|
||||
# multiple time (only on the command line, not in the configuration file where
|
||||
# it should appear only once). See also the "--disable" option for examples.
|
||||
enable=c-extension-no-member
|
||||
|
||||
|
||||
[REPORTS]
|
||||
|
||||
# Python expression which should return a note less than 10 (10 is the highest
|
||||
# note). You have access to the variables errors warning, statement which
|
||||
# respectively contain the number of errors / warnings messages and the total
|
||||
# number of statements analyzed. This is used by the global evaluation report
|
||||
# (RP0004).
|
||||
evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
|
||||
|
||||
# Template used to display messages. This is a python new-style format string
|
||||
# used to format the message information. See doc for all details
|
||||
#msg-template=
|
||||
|
||||
# Set the output format. Available formats are text, parseable, colorized, json
|
||||
# and msvs (visual studio).You can also give a reporter class, eg
|
||||
# mypackage.mymodule.MyReporterClass.
|
||||
output-format=text
|
||||
|
||||
# Tells whether to display a full report or only the messages
|
||||
reports=no
|
||||
|
||||
# Activate the evaluation score.
|
||||
score=yes
|
||||
|
||||
|
||||
[REFACTORING]
|
||||
|
||||
# Maximum number of nested blocks for function / method body
|
||||
max-nested-blocks=5
|
||||
|
||||
# Complete name of functions that never returns. When checking for
|
||||
# inconsistent-return-statements if a never returning function is called then
|
||||
# it will be considered as an explicit return statement and no message will be
|
||||
# printed.
|
||||
never-returning-functions=optparse.Values,sys.exit
|
||||
|
||||
|
||||
[BASIC]
|
||||
|
||||
# Naming style matching correct argument names
|
||||
argument-naming-style=snake_case
|
||||
|
||||
# Regular expression matching correct argument names. Overrides argument-
|
||||
# naming-style
|
||||
#argument-rgx=
|
||||
|
||||
# Naming style matching correct attribute names
|
||||
attr-naming-style=snake_case
|
||||
|
||||
# Regular expression matching correct attribute names. Overrides attr-naming-
|
||||
# style
|
||||
#attr-rgx=
|
||||
|
||||
# Bad variable names which should always be refused, separated by a comma
|
||||
bad-names=foo,
|
||||
bar,
|
||||
baz,
|
||||
toto,
|
||||
tutu,
|
||||
tata
|
||||
|
||||
# Naming style matching correct class attribute names
|
||||
class-attribute-naming-style=any
|
||||
|
||||
# Regular expression matching correct class attribute names. Overrides class-
|
||||
# attribute-naming-style
|
||||
#class-attribute-rgx=
|
||||
|
||||
# Naming style matching correct class names
|
||||
class-naming-style=PascalCase
|
||||
|
||||
# Regular expression matching correct class names. Overrides class-naming-style
|
||||
#class-rgx=
|
||||
|
||||
# Naming style matching correct constant names
|
||||
const-naming-style=UPPER_CASE
|
||||
|
||||
# Regular expression matching correct constant names. Overrides const-naming-
|
||||
# style
|
||||
#const-rgx=
|
||||
|
||||
# Minimum line length for functions/classes that require docstrings, shorter
|
||||
# ones are exempt.
|
||||
docstring-min-length=-1
|
||||
|
||||
# Naming style matching correct function names
|
||||
function-naming-style=snake_case
|
||||
|
||||
# Regular expression matching correct function names. Overrides function-
|
||||
# naming-style
|
||||
#function-rgx=
|
||||
|
||||
# Good variable names which should always be accepted, separated by a comma
|
||||
good-names=i,
|
||||
ip,
|
||||
j,
|
||||
s,
|
||||
a,
|
||||
b,
|
||||
c,
|
||||
p,
|
||||
X,
|
||||
Y,
|
||||
f,
|
||||
l,
|
||||
k,
|
||||
ex,
|
||||
Run,
|
||||
_
|
||||
|
||||
# Include a hint for the correct naming format with invalid-name
|
||||
include-naming-hint=no
|
||||
|
||||
# Naming style matching correct inline iteration names
|
||||
inlinevar-naming-style=any
|
||||
|
||||
# Regular expression matching correct inline iteration names. Overrides
|
||||
# inlinevar-naming-style
|
||||
#inlinevar-rgx=
|
||||
|
||||
# Naming style matching correct method names
|
||||
method-naming-style=snake_case
|
||||
|
||||
# Regular expression matching correct method names. Overrides method-naming-
|
||||
# style
|
||||
#method-rgx=
|
||||
|
||||
# Naming style matching correct module names
|
||||
module-naming-style=snake_case
|
||||
|
||||
# Regular expression matching correct module names. Overrides module-naming-
|
||||
# style
|
||||
#module-rgx=
|
||||
|
||||
# Colon-delimited sets of names that determine each other's naming style when
|
||||
# the name regexes allow several styles.
|
||||
name-group=
|
||||
|
||||
# Regular expression which should only match function or class names that do
|
||||
# not require a docstring.
|
||||
no-docstring-rgx=^_
|
||||
|
||||
# List of decorators that produce properties, such as abc.abstractproperty. Add
|
||||
# to this list to register other decorators that produce valid properties.
|
||||
property-classes=abc.abstractproperty
|
||||
|
||||
# Naming style matching correct variable names
|
||||
variable-naming-style=snake_case
|
||||
|
||||
# Regular expression matching correct variable names. Overrides variable-
|
||||
# naming-style
|
||||
#variable-rgx=
|
||||
|
||||
|
||||
[SPELLING]
|
||||
|
||||
# Limits count of emitted suggestions for spelling mistakes
|
||||
max-spelling-suggestions=4
|
||||
|
||||
# Spelling dictionary name. Available dictionaries: none. To make it working
|
||||
# install python-enchant package.
|
||||
spelling-dict=
|
||||
|
||||
# List of comma separated words that should not be checked.
|
||||
spelling-ignore-words=
|
||||
|
||||
# A path to a file that contains private dictionary; one word per line.
|
||||
spelling-private-dict-file=
|
||||
|
||||
# Tells whether to store unknown words to indicated private dictionary in
|
||||
# --spelling-private-dict-file option instead of raising a message.
|
||||
spelling-store-unknown-words=no
|
||||
|
||||
|
||||
[FORMAT]
|
||||
|
||||
# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
|
||||
expected-line-ending-format=
|
||||
|
||||
# Regexp for a line that is allowed to be longer than the limit.
|
||||
ignore-long-lines=^\s*(# )?<?https?://\S+>?$
|
||||
|
||||
# Number of spaces of indent required inside a hanging or continued line.
|
||||
indent-after-paren=4
|
||||
|
||||
# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
|
||||
# tab).
|
||||
indent-string=' '
|
||||
|
||||
# Maximum number of characters on a single line.
|
||||
max-line-length=100
|
||||
|
||||
# Maximum number of lines in a module
|
||||
max-module-lines=1000
|
||||
|
||||
# List of optional constructs for which whitespace checking is disabled. `dict-
|
||||
# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}.
|
||||
# `trailing-comma` allows a space between comma and closing bracket: (a, ).
|
||||
# `empty-line` allows space-only lines.
|
||||
no-space-check=trailing-comma,
|
||||
dict-separator
|
||||
|
||||
# Allow the body of a class to be on the same line as the declaration if body
|
||||
# contains single statement.
|
||||
single-line-class-stmt=no
|
||||
|
||||
# Allow the body of an if to be on the same line as the test if there is no
|
||||
# else.
|
||||
single-line-if-stmt=no
|
||||
|
||||
|
||||
[LOGGING]
|
||||
|
||||
# Logging modules to check that the string format arguments are in logging
|
||||
# function parameter format
|
||||
logging-modules=logging
|
||||
|
||||
|
||||
[VARIABLES]
|
||||
|
||||
# List of additional names supposed to be defined in builtins. Remember that
|
||||
# you should avoid to define new builtins when possible.
|
||||
additional-builtins=
|
||||
|
||||
# Tells whether unused global variables should be treated as a violation.
|
||||
allow-global-unused-variables=yes
|
||||
|
||||
# List of strings which can identify a callback function by name. A callback
|
||||
# name must start or end with one of those strings.
|
||||
callbacks=cb_,
|
||||
_cb
|
||||
|
||||
# A regular expression matching the name of dummy variables (i.e. expectedly
|
||||
# not used).
|
||||
dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_
|
||||
|
||||
# Argument names that match this expression will be ignored. Default to name
|
||||
# with leading underscore
|
||||
ignored-argument-names=_.*|^ignored_|^unused_
|
||||
|
||||
# Tells whether we should check for unused import in __init__ files.
|
||||
init-import=no
|
||||
|
||||
# List of qualified module names which can have objects that can redefine
|
||||
# builtins.
|
||||
redefining-builtins-modules=six.moves,past.builtins,future.builtins,io,builtins
|
||||
|
||||
|
||||
[MISCELLANEOUS]
|
||||
|
||||
# List of note tags to take in consideration, separated by a comma.
|
||||
notes=FIXME,
|
||||
XXX,
|
||||
TODO
|
||||
|
||||
|
||||
[SIMILARITIES]
|
||||
|
||||
# Ignore comments when computing similarities.
|
||||
ignore-comments=yes
|
||||
|
||||
# Ignore docstrings when computing similarities.
|
||||
ignore-docstrings=yes
|
||||
|
||||
# Ignore imports when computing similarities.
|
||||
ignore-imports=no
|
||||
|
||||
# Minimum lines number of a similarity.
|
||||
min-similarity-lines=4
|
||||
|
||||
|
||||
[TYPECHECK]
|
||||
|
||||
# List of decorators that produce context managers, such as
|
||||
# contextlib.contextmanager. Add to this list to register other decorators that
|
||||
# produce valid context managers.
|
||||
contextmanager-decorators=contextlib.contextmanager
|
||||
|
||||
# List of members which are set dynamically and missed by pylint inference
|
||||
# system, and so shouldn't trigger E1101 when accessed. Python regular
|
||||
# expressions are accepted.
|
||||
generated-members=
|
||||
|
||||
# Tells whether missing members accessed in mixin class should be ignored. A
|
||||
# mixin class is detected if its name ends with "mixin" (case insensitive).
|
||||
ignore-mixin-members=yes
|
||||
|
||||
# This flag controls whether pylint should warn about no-member and similar
|
||||
# checks whenever an opaque object is returned when inferring. The inference
|
||||
# can return multiple potential results while evaluating a Python object, but
|
||||
# some branches might not be evaluated, which results in partial inference. In
|
||||
# that case, it might be useful to still emit no-member and other checks for
|
||||
# the rest of the inferred objects.
|
||||
ignore-on-opaque-inference=yes
|
||||
|
||||
# List of class names for which member attributes should not be checked (useful
|
||||
# for classes with dynamically set attributes). This supports the use of
|
||||
# qualified names.
|
||||
ignored-classes=optparse.Values,thread._local,_thread._local
|
||||
|
||||
# List of module names for which member attributes should not be checked
|
||||
# (useful for modules/projects where namespaces are manipulated during runtime
|
||||
# and thus existing member attributes cannot be deduced by static analysis. It
|
||||
# supports qualified module names, as well as Unix pattern matching.
|
||||
ignored-modules=
|
||||
|
||||
# Show a hint with possible names when a member name was not found. The aspect
|
||||
# of finding the hint is based on edit distance.
|
||||
missing-member-hint=yes
|
||||
|
||||
# The minimum edit distance a name should have in order to be considered a
|
||||
# similar match for a missing member name.
|
||||
missing-member-hint-distance=1
|
||||
|
||||
# The total number of similar names that should be taken in consideration when
|
||||
# showing a hint for a missing member.
|
||||
missing-member-max-choices=1
|
||||
|
||||
|
||||
[DESIGN]
|
||||
|
||||
# Maximum number of arguments for function / method
|
||||
max-args=5
|
||||
|
||||
# Maximum number of attributes for a class (see R0902).
|
||||
max-attributes=7
|
||||
|
||||
# Maximum number of boolean expressions in a if statement
|
||||
max-bool-expr=5
|
||||
|
||||
# Maximum number of branch for function / method body
|
||||
max-branches=12
|
||||
|
||||
# Maximum number of locals for function / method body
|
||||
max-locals=15
|
||||
|
||||
# Maximum number of parents for a class (see R0901).
|
||||
max-parents=7
|
||||
|
||||
# Maximum number of public methods for a class (see R0904).
|
||||
max-public-methods=20
|
||||
|
||||
# Maximum number of return / yield for function / method body
|
||||
max-returns=6
|
||||
|
||||
# Maximum number of statements in function / method body
|
||||
max-statements=50
|
||||
|
||||
# Minimum number of public methods for a class (see R0903).
|
||||
min-public-methods=2
|
||||
|
||||
|
||||
[CLASSES]
|
||||
|
||||
# List of method names used to declare (i.e. assign) instance attributes.
|
||||
defining-attr-methods=__init__,
|
||||
__new__,
|
||||
setUp
|
||||
|
||||
# List of member names, which should be excluded from the protected access
|
||||
# warning.
|
||||
exclude-protected=_asdict,
|
||||
_fields,
|
||||
_replace,
|
||||
_source,
|
||||
_make
|
||||
|
||||
# List of valid names for the first argument in a class method.
|
||||
valid-classmethod-first-arg=cls
|
||||
|
||||
# List of valid names for the first argument in a metaclass class method.
|
||||
valid-metaclass-classmethod-first-arg=mcs
|
||||
|
||||
|
||||
[IMPORTS]
|
||||
|
||||
# Allow wildcard imports from modules that define __all__.
|
||||
allow-wildcard-with-all=no
|
||||
|
||||
# Analyse import fallback blocks. This can be used to support both Python 2 and
|
||||
# 3 compatible code, which means that the block might have code that exists
|
||||
# only in one or another interpreter, leading to false positives when analysed.
|
||||
analyse-fallback-blocks=no
|
||||
|
||||
# Deprecated modules which should not be used, separated by a comma
|
||||
deprecated-modules=regsub,
|
||||
TERMIOS,
|
||||
Bastion,
|
||||
rexec
|
||||
|
||||
# Create a graph of external dependencies in the given file (report RP0402 must
|
||||
# not be disabled)
|
||||
ext-import-graph=
|
||||
|
||||
# Create a graph of every (i.e. internal and external) dependencies in the
|
||||
# given file (report RP0402 must not be disabled)
|
||||
import-graph=
|
||||
|
||||
# Create a graph of internal dependencies in the given file (report RP0402 must
|
||||
# not be disabled)
|
||||
int-import-graph=
|
||||
|
||||
# Force import order to recognize a module as part of the standard
|
||||
# compatibility libraries.
|
||||
known-standard-library=
|
||||
|
||||
# Force import order to recognize a module as part of a third party library.
|
||||
known-third-party=enchant
|
||||
|
||||
|
||||
[EXCEPTIONS]
|
||||
|
||||
# Exceptions that will emit a warning when being caught. Defaults to
|
||||
# "Exception"
|
||||
overgeneral-exceptions=Exception
|
|
@ -0,0 +1,21 @@
|
|||
#!/bin/bash
|
||||
#####################################
|
||||
## File name : remove_env.sh
|
||||
## Create date : 2018-11-25 16:01
|
||||
## Modified date : 2019-01-27 16:25
|
||||
## Author : DARREN
|
||||
## Describe : not set
|
||||
## Email : lzygzh@126.com
|
||||
####################################
|
||||
|
||||
realpath=$(readlink -f "$0")
|
||||
export basedir=$(dirname "$realpath")
|
||||
export filename=$(basename "$realpath")
|
||||
export PATH=$PATH:$basedir/dlbase
|
||||
export PATH=$PATH:$basedir/dlproc
|
||||
#base sh file
|
||||
. dlbase.sh
|
||||
#function sh file
|
||||
. etc.sh
|
||||
|
||||
rm -rf $env_path
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,268 @@
|
|||
Ang
|
||||
Au-Yong
|
||||
Bai
|
||||
Ban
|
||||
Bao
|
||||
Bei
|
||||
Bian
|
||||
Bui
|
||||
Cai
|
||||
Cao
|
||||
Cen
|
||||
Chai
|
||||
Chaim
|
||||
Chan
|
||||
Chang
|
||||
Chao
|
||||
Che
|
||||
Chen
|
||||
Cheng
|
||||
Cheung
|
||||
Chew
|
||||
Chieu
|
||||
Chin
|
||||
Chong
|
||||
Chou
|
||||
Chu
|
||||
Cui
|
||||
Dai
|
||||
Deng
|
||||
Ding
|
||||
Dong
|
||||
Dou
|
||||
Duan
|
||||
Eng
|
||||
Fan
|
||||
Fei
|
||||
Feng
|
||||
Foong
|
||||
Fung
|
||||
Gan
|
||||
Gauk
|
||||
Geng
|
||||
Gim
|
||||
Gok
|
||||
Gong
|
||||
Guan
|
||||
Guang
|
||||
Guo
|
||||
Gwock
|
||||
Han
|
||||
Hang
|
||||
Hao
|
||||
Hew
|
||||
Hiu
|
||||
Hong
|
||||
Hor
|
||||
Hsiao
|
||||
Hua
|
||||
Huan
|
||||
Huang
|
||||
Hui
|
||||
Huie
|
||||
Huo
|
||||
Jia
|
||||
Jiang
|
||||
Jin
|
||||
Jing
|
||||
Joe
|
||||
Kang
|
||||
Kau
|
||||
Khoo
|
||||
Khu
|
||||
Kong
|
||||
Koo
|
||||
Kwan
|
||||
Kwei
|
||||
Kwong
|
||||
Lai
|
||||
Lam
|
||||
Lang
|
||||
Lau
|
||||
Law
|
||||
Lew
|
||||
Lian
|
||||
Liao
|
||||
Lim
|
||||
Lin
|
||||
Ling
|
||||
Liu
|
||||
Loh
|
||||
Long
|
||||
Loong
|
||||
Luo
|
||||
Mah
|
||||
Mai
|
||||
Mak
|
||||
Mao
|
||||
Mar
|
||||
Mei
|
||||
Meng
|
||||
Miao
|
||||
Min
|
||||
Ming
|
||||
Moy
|
||||
Mui
|
||||
Nie
|
||||
Niu
|
||||
Ou-Yang
|
||||
Ow-Yang
|
||||
Pan
|
||||
Pang
|
||||
Pei
|
||||
Peng
|
||||
Ping
|
||||
Qian
|
||||
Qin
|
||||
Qiu
|
||||
Quan
|
||||
Que
|
||||
Ran
|
||||
Rao
|
||||
Rong
|
||||
Ruan
|
||||
Sam
|
||||
Seah
|
||||
See
|
||||
Seow
|
||||
Seto
|
||||
Sha
|
||||
Shan
|
||||
Shang
|
||||
Shao
|
||||
Shaw
|
||||
She
|
||||
Shen
|
||||
Sheng
|
||||
Shi
|
||||
Shu
|
||||
Shuai
|
||||
Shui
|
||||
Shum
|
||||
Siew
|
||||
Siu
|
||||
Song
|
||||
Sum
|
||||
Sun
|
||||
Sze
|
||||
Tan
|
||||
Tang
|
||||
Tao
|
||||
Teng
|
||||
Teoh
|
||||
Thean
|
||||
Thian
|
||||
Thien
|
||||
Tian
|
||||
Tong
|
||||
Tow
|
||||
Tsang
|
||||
Tse
|
||||
Tsen
|
||||
Tso
|
||||
Tze
|
||||
Wan
|
||||
Wang
|
||||
Wei
|
||||
Wen
|
||||
Weng
|
||||
Won
|
||||
Wong
|
||||
Woo
|
||||
Xiang
|
||||
Xiao
|
||||
Xie
|
||||
Xing
|
||||
Xue
|
||||
Xun
|
||||
Yan
|
||||
Yang
|
||||
Yao
|
||||
Yap
|
||||
Yau
|
||||
Yee
|
||||
Yep
|
||||
Yim
|
||||
Yin
|
||||
Ying
|
||||
Yong
|
||||
You
|
||||
Yuan
|
||||
Zang
|
||||
Zeng
|
||||
Zha
|
||||
Zhan
|
||||
Zhang
|
||||
Zhao
|
||||
Zhen
|
||||
Zheng
|
||||
Zhong
|
||||
Zhou
|
||||
Zhu
|
||||
Zhuo
|
||||
Zong
|
||||
Zou
|
||||
Bing
|
||||
Chi
|
||||
Chu
|
||||
Cong
|
||||
Cuan
|
||||
Dan
|
||||
Fei
|
||||
Feng
|
||||
Gai
|
||||
Gao
|
||||
Gou
|
||||
Guan
|
||||
Gui
|
||||
Guo
|
||||
Hong
|
||||
Hou
|
||||
Huan
|
||||
Jian
|
||||
Jiao
|
||||
Jin
|
||||
Jiu
|
||||
Juan
|
||||
Jue
|
||||
Kan
|
||||
Kuai
|
||||
Kuang
|
||||
Kui
|
||||
Lao
|
||||
Liang
|
||||
Lu:
|
||||
Luo
|
||||
Man
|
||||
Nao
|
||||
Pian
|
||||
Qiao
|
||||
Qing
|
||||
Qiu
|
||||
Rang
|
||||
Rui
|
||||
She
|
||||
Shi
|
||||
Shuo
|
||||
Sui
|
||||
Tai
|
||||
Wan
|
||||
Wei
|
||||
Xian
|
||||
Xie
|
||||
Xin
|
||||
Xing
|
||||
Xiong
|
||||
Xuan
|
||||
Yan
|
||||
Yin
|
||||
Ying
|
||||
Yuan
|
||||
Yue
|
||||
Yun
|
||||
Zha
|
||||
Zhai
|
||||
Zhang
|
||||
Zhi
|
||||
Zhuan
|
||||
Zhui
|
|
@ -0,0 +1,519 @@
|
|||
Abl
|
||||
Adsit
|
||||
Ajdrna
|
||||
Alt
|
||||
Antonowitsch
|
||||
Antonowitz
|
||||
Bacon
|
||||
Ballalatak
|
||||
Ballaltick
|
||||
Bartonova
|
||||
Bastl
|
||||
Baroch
|
||||
Benesch
|
||||
Betlach
|
||||
Biganska
|
||||
Bilek
|
||||
Blahut
|
||||
Blazek
|
||||
Blazek
|
||||
Blazejovsky
|
||||
Blecha
|
||||
Bleskan
|
||||
Blober
|
||||
Bock
|
||||
Bohac
|
||||
Bohunovsky
|
||||
Bolcar
|
||||
Borovka
|
||||
Borovski
|
||||
Borowski
|
||||
Borovsky
|
||||
Brabbery
|
||||
Brezovjak
|
||||
Brousil
|
||||
Bruckner
|
||||
Buchta
|
||||
Cablikova
|
||||
Camfrlova
|
||||
Cap
|
||||
Cerda
|
||||
Cermak
|
||||
Chermak
|
||||
Cermak
|
||||
Cernochova
|
||||
Cernohous
|
||||
Cerny
|
||||
Cerney
|
||||
Cerny
|
||||
Cerv
|
||||
Cervenka
|
||||
Chalupka
|
||||
Charlott
|
||||
Chemlik
|
||||
Chicken
|
||||
Chilar
|
||||
Chromy
|
||||
Cihak
|
||||
Clineburg
|
||||
Klineberg
|
||||
Cober
|
||||
Colling
|
||||
Cvacek
|
||||
Czabal
|
||||
Damell
|
||||
Demall
|
||||
Dehmel
|
||||
Dana
|
||||
Dejmal
|
||||
Dempko
|
||||
Demko
|
||||
Dinko
|
||||
Divoky
|
||||
Dolejsi
|
||||
Dolezal
|
||||
Doljs
|
||||
Dopita
|
||||
Drassal
|
||||
Driml
|
||||
Duyava
|
||||
Dvorak
|
||||
Dziadik
|
||||
Egr
|
||||
Entler
|
||||
Faltysek
|
||||
Faltejsek
|
||||
Fencl
|
||||
Fenyo
|
||||
Fillipova
|
||||
Finfera
|
||||
Finferovy
|
||||
Finke
|
||||
Fojtikova
|
||||
Fremut
|
||||
Friedrich
|
||||
Frierdich
|
||||
Fritsch
|
||||
Furtsch
|
||||
Gabrisova
|
||||
Gavalok
|
||||
Geier
|
||||
Georgijev
|
||||
Geryk
|
||||
Giersig
|
||||
Glatter
|
||||
Glockl
|
||||
Grabski
|
||||
Grozmanova
|
||||
Grulich
|
||||
Grygarova
|
||||
Hadash
|
||||
Hafernik
|
||||
Hajek
|
||||
Hajicek
|
||||
Hajkova
|
||||
Hana
|
||||
Hanek
|
||||
Hanek
|
||||
Hanika
|
||||
Hanusch
|
||||
Hanzlick
|
||||
Handzlik
|
||||
Hanzlik
|
||||
Harger
|
||||
Hartl
|
||||
Havlatova
|
||||
Havlice
|
||||
Hawlata
|
||||
Heidl
|
||||
Herback
|
||||
Herodes
|
||||
Hiorvst
|
||||
Hladky
|
||||
Hlavsa
|
||||
Hnizdil
|
||||
Hodowal
|
||||
Hodoval
|
||||
Holan
|
||||
Holub
|
||||
Homulka
|
||||
Hora
|
||||
Hovanec
|
||||
Hrabak
|
||||
Hradek
|
||||
Hrdy
|
||||
Hrula
|
||||
Hruska
|
||||
Hruskova
|
||||
Hudecek
|
||||
Husk
|
||||
Hynna
|
||||
Jaluvka
|
||||
Janca
|
||||
Janicek
|
||||
Jenicek
|
||||
Janacek
|
||||
Janick
|
||||
Janoch
|
||||
Janosik
|
||||
Janutka
|
||||
Jares
|
||||
Jarzembowski
|
||||
Jedlicka
|
||||
Jelinek
|
||||
Jindra
|
||||
Jirava
|
||||
Jirik
|
||||
Jirku
|
||||
Jirovy
|
||||
Jobst
|
||||
Jonas
|
||||
Kacirek
|
||||
Kafka
|
||||
Kafka
|
||||
Kaiser
|
||||
Kanak
|
||||
Kaplanek
|
||||
Kara
|
||||
Karlovsky
|
||||
Kasa
|
||||
Kasimor
|
||||
Kazimor
|
||||
Kazmier
|
||||
Katschker
|
||||
Kauphsman
|
||||
Kenzel
|
||||
Kerner
|
||||
Kesl
|
||||
Kessel
|
||||
Kessler
|
||||
Khork
|
||||
Kirchma
|
||||
Klein
|
||||
Klemper
|
||||
Klimes
|
||||
Kober
|
||||
Koberna
|
||||
Koci
|
||||
Kocian
|
||||
Kocian
|
||||
Kofron
|
||||
Kolacny
|
||||
Koliha
|
||||
Kolman
|
||||
Koma
|
||||
Komo
|
||||
Coma
|
||||
Konarik
|
||||
Kopp
|
||||
Kopecky
|
||||
Korandak
|
||||
Korycan
|
||||
Korycansky
|
||||
Kosko
|
||||
Kouba
|
||||
Kouba
|
||||
Koukal
|
||||
Koza
|
||||
Kozumplikova
|
||||
Kratschmar
|
||||
Krawiec
|
||||
Kreisinger
|
||||
Kremlacek
|
||||
Kremlicka
|
||||
Kreutschmer
|
||||
Krhovsky
|
||||
Krivan
|
||||
Krivolavy
|
||||
Kriz
|
||||
Kruessel
|
||||
Krupala
|
||||
Krytinar
|
||||
Kubin
|
||||
Kucera
|
||||
Kucharova
|
||||
Kudrna
|
||||
Kuffel
|
||||
Kupfel
|
||||
Kofel
|
||||
Kulhanek
|
||||
Kunik
|
||||
Kurtz
|
||||
Kusak
|
||||
Kvasnicka
|
||||
Lawa
|
||||
Linart
|
||||
Lind
|
||||
Lokay
|
||||
Loskot
|
||||
Ludwig
|
||||
Lynsmeier
|
||||
Macha
|
||||
Machacek
|
||||
Macikova
|
||||
Malafa
|
||||
Malec
|
||||
Malecha
|
||||
Maly
|
||||
Marek
|
||||
Marik
|
||||
Marik
|
||||
Markytan
|
||||
Matejka
|
||||
Matjeka
|
||||
Matocha
|
||||
Maxa/B
|
||||
Mayer
|
||||
Meier
|
||||
Merta
|
||||
Meszes
|
||||
Metjeka
|
||||
Michalovic
|
||||
Michalovicova
|
||||
Miksatkova
|
||||
Mojzis
|
||||
Mojjis
|
||||
Mozzis
|
||||
Molcan
|
||||
Monfort
|
||||
MonkoAustria
|
||||
Morava
|
||||
Morek
|
||||
Muchalon
|
||||
Mudra
|
||||
Muhlbauer
|
||||
Nadvornizch
|
||||
Nadwornik
|
||||
Navara
|
||||
Navratil
|
||||
Navratil
|
||||
Navrkal
|
||||
Nekuza
|
||||
Nemec
|
||||
Nemecek
|
||||
Nestrojil
|
||||
Netsch
|
||||
Neusser
|
||||
Neisser
|
||||
Naizer
|
||||
Novak
|
||||
Nowak
|
||||
Novotny
|
||||
Novy Novy
|
||||
Oborny
|
||||
Ocasek
|
||||
Ocaskova
|
||||
Oesterreicher
|
||||
Okenfuss
|
||||
Olbrich
|
||||
Ondrisek
|
||||
Opizka
|
||||
Opova
|
||||
Opp
|
||||
Osladil
|
||||
Ozimuk
|
||||
Pachr
|
||||
Palzewicz
|
||||
Panek
|
||||
Patril
|
||||
Pavlik
|
||||
Pavlicka
|
||||
Pavlu
|
||||
Pawlak
|
||||
Pear
|
||||
Peary
|
||||
Pech
|
||||
Peisar
|
||||
Paisar
|
||||
Paiser
|
||||
Perevuznik
|
||||
Perina
|
||||
Persein
|
||||
Petrezelka
|
||||
Petru
|
||||
Pesek
|
||||
Petersen
|
||||
Pfeifer
|
||||
Picha
|
||||
Pillar
|
||||
Pellar
|
||||
Piller
|
||||
Pinter
|
||||
Pitterman
|
||||
Planick
|
||||
Piskach
|
||||
Plisek
|
||||
Plisko
|
||||
Pokorny
|
||||
Ponec
|
||||
Ponec
|
||||
Prachar
|
||||
Praseta
|
||||
Prchal
|
||||
Prehatney
|
||||
Pretsch
|
||||
Prill
|
||||
Psik
|
||||
Pudel
|
||||
Purdes
|
||||
Quasninsky
|
||||
Raffel
|
||||
Rafaj1
|
||||
Ransom
|
||||
Rezac
|
||||
Riedel
|
||||
Riha
|
||||
Riha
|
||||
Ritchie
|
||||
Rozinek
|
||||
Ruba
|
||||
Ruda
|
||||
Rumisek
|
||||
Ruzicka
|
||||
Rypka
|
||||
Rebka
|
||||
Rzehak
|
||||
Sabol
|
||||
Safko
|
||||
Samz
|
||||
Sankovsky
|
||||
Sappe
|
||||
Sappe
|
||||
Sarna
|
||||
Satorie
|
||||
Savchak
|
||||
Svotak
|
||||
Swatchak
|
||||
Svocak
|
||||
Svotchak
|
||||
Schallom
|
||||
Schenk
|
||||
Schlantz
|
||||
Schmeiser
|
||||
Schneider
|
||||
Schmied
|
||||
Schubert
|
||||
Schwarz
|
||||
Schwartz
|
||||
Sedmik
|
||||
Sedmikova
|
||||
Seger
|
||||
Sekovora
|
||||
Semick
|
||||
Serak
|
||||
Sherak
|
||||
Shima
|
||||
Shula
|
||||
Siegl
|
||||
Silhan
|
||||
Simecek
|
||||
Simodines
|
||||
Simonek
|
||||
Sip
|
||||
Sitta
|
||||
Skala
|
||||
Skeril
|
||||
Skokan
|
||||
Skomicka
|
||||
Skwor
|
||||
Slapnickova
|
||||
Slejtr
|
||||
Slepicka
|
||||
Slepica
|
||||
Slezak
|
||||
Slivka
|
||||
Smith
|
||||
Snelker
|
||||
Sokolik
|
||||
Soucek
|
||||
Soukup
|
||||
Soukup
|
||||
Spicka
|
||||
Spoerl
|
||||
Sponer
|
||||
Srda
|
||||
Srpcikova
|
||||
Stangl
|
||||
Stanzel
|
||||
Stary
|
||||
Staska
|
||||
Stedronsky
|
||||
Stegon
|
||||
Sztegon
|
||||
Steinborn
|
||||
Stepan
|
||||
Stites
|
||||
Stluka
|
||||
Stotzky
|
||||
StrakaO
|
||||
Stramba
|
||||
Stupka
|
||||
Subertova
|
||||
Suchanka
|
||||
Sula
|
||||
Svejda
|
||||
Svejkovsky
|
||||
Svoboda
|
||||
Tejc
|
||||
Tikal
|
||||
Tykal
|
||||
Till
|
||||
Timpe
|
||||
Timpy
|
||||
Toman
|
||||
Tomanek
|
||||
Tomasek
|
||||
Tomes
|
||||
Trampotova
|
||||
Trampota
|
||||
Treblik
|
||||
Trnkova
|
||||
Uerling
|
||||
Uhlik
|
||||
Urbanek
|
||||
Urbanek1
|
||||
Urbanovska
|
||||
Urista
|
||||
Ustohal
|
||||
Vaca
|
||||
Vaculova
|
||||
Vavra
|
||||
Vejvoda
|
||||
Veverka
|
||||
Victor
|
||||
Vlach
|
||||
Vlach
|
||||
Vlasak
|
||||
Vlasek
|
||||
Volcik
|
||||
Voneve
|
||||
Votke
|
||||
Vozab
|
||||
Vrazel
|
||||
Vykruta
|
||||
Wykruta
|
||||
Waclauska
|
||||
Weichert
|
||||
Weineltk
|
||||
Weisener
|
||||
Wiesner
|
||||
Wizner
|
||||
Weiss
|
||||
Werlla
|
||||
Whitmire1
|
||||
Widerlechner
|
||||
Wilchek
|
||||
Wondracek
|
||||
Wood
|
||||
Zajicek
|
||||
Zak
|
||||
Zajicek
|
||||
Zaruba
|
||||
Zaruba
|
||||
Zelinka
|
||||
Zeman
|
||||
Zimola
|
||||
Zipperer
|
||||
Zitka
|
||||
Zoucha
|
||||
Zwolenksy
|
|
@ -0,0 +1,297 @@
|
|||
Aalsburg
|
||||
Aalst
|
||||
Aarle
|
||||
Achteren
|
||||
Achthoven
|
||||
Adrichem
|
||||
Aggelen
|
||||
Agteren
|
||||
Agthoven
|
||||
Akkeren
|
||||
Aller
|
||||
Alphen
|
||||
Alst
|
||||
Altena
|
||||
Althuis
|
||||
Amelsvoort
|
||||
Amersvoort
|
||||
Amstel
|
||||
Andel
|
||||
Andringa
|
||||
Ankeren
|
||||
Antwerp
|
||||
Antwerpen
|
||||
Apeldoorn
|
||||
Arendonk
|
||||
Asch
|
||||
Assen
|
||||
Baarle
|
||||
Bokhoven
|
||||
Breda
|
||||
Bueren
|
||||
Buggenum
|
||||
Buiren
|
||||
Buren
|
||||
Can
|
||||
Cann
|
||||
Canne
|
||||
Daal
|
||||
Daalen
|
||||
Dael
|
||||
Daele
|
||||
Dale
|
||||
Dalen
|
||||
Laar
|
||||
Vliert
|
||||
Akker
|
||||
Andel
|
||||
Denend
|
||||
Aart
|
||||
Beek
|
||||
Berg
|
||||
Hout
|
||||
Laar
|
||||
See
|
||||
Stoep
|
||||
Veen
|
||||
Ven
|
||||
Venn
|
||||
Venne
|
||||
Vennen
|
||||
Zee
|
||||
Donk
|
||||
Haanraads
|
||||
Haanraats
|
||||
Haanrade
|
||||
Haanrath
|
||||
Haenraats
|
||||
Haenraets
|
||||
Hanraets
|
||||
Hassel
|
||||
Hautem
|
||||
Hautum
|
||||
Heel
|
||||
Herten
|
||||
Hofwegen
|
||||
Horn
|
||||
Hout
|
||||
Houte
|
||||
Houtem
|
||||
Houten
|
||||
Houttum
|
||||
Houtum
|
||||
Kan
|
||||
Kann
|
||||
Kanne
|
||||
Kappel
|
||||
Karl
|
||||
Kikkert
|
||||
Klein
|
||||
Klerk
|
||||
Klerken
|
||||
Klerks
|
||||
Klerkse
|
||||
Klerkx
|
||||
Klerx
|
||||
Kloet
|
||||
Kloeten
|
||||
Kloeter
|
||||
Koeman
|
||||
Koemans
|
||||
Kolen
|
||||
Kolijn
|
||||
Kollen
|
||||
Koning
|
||||
Kool
|
||||
Koole
|
||||
Koolen
|
||||
Kools
|
||||
Kouman
|
||||
Koumans
|
||||
Krantz
|
||||
Kranz
|
||||
Krusen
|
||||
Kuijpers
|
||||
Kuiper
|
||||
Kuipers
|
||||
Laar
|
||||
Langbroek
|
||||
Laren
|
||||
Lauwens
|
||||
Lauwers
|
||||
Leeuwenhoeck
|
||||
Leeuwenhoek
|
||||
Leeuwenhoek
|
||||
Lucas
|
||||
Lucassen
|
||||
Lyon
|
||||
Maas
|
||||
Maes
|
||||
Maessen
|
||||
Marquering
|
||||
Marqueringh
|
||||
Marquerink
|
||||
Mas
|
||||
Meeuwe
|
||||
Meeuwes
|
||||
Meeuwessen
|
||||
Meeuweszen
|
||||
Meeuwis
|
||||
Meeuwissen
|
||||
Meeuwsen
|
||||
Meisner
|
||||
Merckx
|
||||
Mertens
|
||||
Michel
|
||||
Middelburg
|
||||
Middlesworth
|
||||
Mohren
|
||||
Mooren
|
||||
Mulder
|
||||
Muyskens
|
||||
Nagel
|
||||
Nelissen
|
||||
Nifterick
|
||||
Nifterick
|
||||
Nifterik
|
||||
Nifterik
|
||||
Niftrik
|
||||
Niftrik
|
||||
Offermans
|
||||
Ogterop
|
||||
Ogtrop
|
||||
Oirschot
|
||||
Oirschotten
|
||||
Oomen
|
||||
Oorschot
|
||||
Oorschot
|
||||
Ophoven
|
||||
Otten
|
||||
Pander
|
||||
Panders
|
||||
Paulis
|
||||
Paulissen
|
||||
Peerenboom
|
||||
Peeters
|
||||
Peij
|
||||
Pender
|
||||
Penders
|
||||
Pennders
|
||||
Penner
|
||||
Penners
|
||||
Peter
|
||||
Peusen
|
||||
Pey
|
||||
Philips
|
||||
Prinsen
|
||||
Rademaker
|
||||
Rademakers
|
||||
Ramaaker
|
||||
Ramaker
|
||||
Ramakers
|
||||
Ramecker
|
||||
Rameckers
|
||||
Raske
|
||||
Reijnder
|
||||
Reijnders
|
||||
Reinder
|
||||
Reinders
|
||||
Reynder
|
||||
Reynders
|
||||
Richard
|
||||
Rietveld
|
||||
Rijnder
|
||||
Rijnders
|
||||
Robert
|
||||
Roggeveen
|
||||
Roijacker
|
||||
Roijackers
|
||||
Roijakker
|
||||
Roijakkers
|
||||
Romeijn
|
||||
Romeijnders
|
||||
Romeijnsen
|
||||
Romijn
|
||||
Romijnders
|
||||
Romijnsen
|
||||
Rompa
|
||||
Rompa
|
||||
Rompaeij
|
||||
Rompaey
|
||||
Rompaij
|
||||
Rompay
|
||||
Rompaye
|
||||
Rompu
|
||||
Rompuy
|
||||
Rooiakker
|
||||
Rooiakkers
|
||||
Rooijakker
|
||||
Rooijakkers
|
||||
Roosa
|
||||
Roosevelt
|
||||
Rossem
|
||||
Rossum
|
||||
Rumpade
|
||||
Rutten
|
||||
Ryskamp
|
||||
Samson
|
||||
Sanna
|
||||
Schenck
|
||||
Schermer
|
||||
Schneider
|
||||
Schneiders
|
||||
Schneijder
|
||||
Schneijders
|
||||
Schoonenburg
|
||||
Schoonraad
|
||||
Schoorel
|
||||
Schoorel
|
||||
Schoorl
|
||||
Schorel
|
||||
Schrijnemakers
|
||||
Schuyler
|
||||
Schwarzenberg
|
||||
Seeger
|
||||
Seegers
|
||||
Seelen
|
||||
Segers
|
||||
Segher
|
||||
Seghers
|
||||
Severijns
|
||||
Severins
|
||||
Sevriens
|
||||
Silje
|
||||
Simon
|
||||
Simonis
|
||||
Slootmaekers
|
||||
Smeets
|
||||
Smets
|
||||
Smit
|
||||
Smits
|
||||
Snaaijer
|
||||
Snaijer
|
||||
Sneiders
|
||||
Sneijder
|
||||
Sneijders
|
||||
Sneijer
|
||||
Sneijers
|
||||
Snell
|
||||
Snider
|
||||
Sniders
|
||||
Snijder
|
||||
Snijders
|
||||
Snyder
|
||||
Snyders
|
||||
Specht
|
||||
Spijker
|
||||
Spiker
|
||||
Ter Avest
|
||||
Teunissen
|
||||
Theunissen
|
||||
Tholberg
|
||||
Tillens
|
||||
Tunison
|
||||
Tunneson
|
||||
Vandale
|
||||
Vandroogenbroeck
|
||||
Vann
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,277 @@
|
|||
Abel
|
||||
Abraham
|
||||
Adam
|
||||
Albert
|
||||
Allard
|
||||
Archambault
|
||||
Armistead
|
||||
Arthur
|
||||
Augustin
|
||||
Babineaux
|
||||
Baudin
|
||||
Beauchene
|
||||
Beaulieu
|
||||
Beaumont
|
||||
Bélanger
|
||||
Bellamy
|
||||
Bellerose
|
||||
Belrose
|
||||
Berger
|
||||
Béringer
|
||||
Bernard
|
||||
Bertrand
|
||||
Bisset
|
||||
Bissette
|
||||
Blaise
|
||||
Blanc
|
||||
Blanchet
|
||||
Blanchett
|
||||
Bonfils
|
||||
Bonheur
|
||||
Bonhomme
|
||||
Bonnaire
|
||||
Bonnay
|
||||
Bonner
|
||||
Bonnet
|
||||
Borde
|
||||
Bordelon
|
||||
Bouchard
|
||||
Boucher
|
||||
Brisbois
|
||||
Brodeur
|
||||
Bureau
|
||||
Caron
|
||||
Cavey
|
||||
Chaput
|
||||
Charbonneau
|
||||
Charpentier
|
||||
Charron
|
||||
Chastain
|
||||
Chevalier
|
||||
Chevrolet
|
||||
Cloutier
|
||||
Colbert
|
||||
Comtois
|
||||
Cornett
|
||||
Coté
|
||||
Coupe
|
||||
Courtemanche
|
||||
Cousineau
|
||||
Couture
|
||||
Daniau
|
||||
D'aramitz
|
||||
Daviau
|
||||
David
|
||||
Deforest
|
||||
Degarmo
|
||||
Delacroix
|
||||
De la fontaine
|
||||
Deniau
|
||||
Deniaud
|
||||
Deniel
|
||||
Denis
|
||||
De sauveterre
|
||||
Deschamps
|
||||
Descoteaux
|
||||
Desjardins
|
||||
Desrochers
|
||||
Desrosiers
|
||||
Dubois
|
||||
Duchamps
|
||||
Dufort
|
||||
Dufour
|
||||
Duguay
|
||||
Dupond
|
||||
Dupont
|
||||
Durand
|
||||
Durant
|
||||
Duval
|
||||
Émile
|
||||
Eustis
|
||||
Fabian
|
||||
Fabre
|
||||
Fabron
|
||||
Faucher
|
||||
Faucheux
|
||||
Faure
|
||||
Favager
|
||||
Favre
|
||||
Favreau
|
||||
Fay
|
||||
Félix
|
||||
Firmin
|
||||
Fontaine
|
||||
Forest
|
||||
Forestier
|
||||
Fortier
|
||||
Foss
|
||||
Fournier
|
||||
Gage
|
||||
Gagne
|
||||
Gagnier
|
||||
Gagnon
|
||||
Garcon
|
||||
Gardinier
|
||||
Germain
|
||||
Géroux
|
||||
Giles
|
||||
Girard
|
||||
Giroux
|
||||
Glaisyer
|
||||
Gosse
|
||||
Gosselin
|
||||
Granger
|
||||
Guérin
|
||||
Guillory
|
||||
Hardy
|
||||
Harman
|
||||
Hébert
|
||||
Herbert
|
||||
Herriot
|
||||
Jacques
|
||||
Janvier
|
||||
Jordan
|
||||
Joubert
|
||||
Labelle
|
||||
Lachance
|
||||
Lachapelle
|
||||
Lamar
|
||||
Lambert
|
||||
Lane
|
||||
Langlais
|
||||
Langlois
|
||||
Lapointe
|
||||
Larue
|
||||
Laurent
|
||||
Lavigne
|
||||
Lavoie
|
||||
Leandres
|
||||
Lebeau
|
||||
Leblanc
|
||||
Leclair
|
||||
Leclerc
|
||||
Lécuyer
|
||||
Lefebvre
|
||||
Lefévre
|
||||
Lefurgey
|
||||
Legrand
|
||||
Lemaire
|
||||
Lémieux
|
||||
Leon
|
||||
Leroy
|
||||
Lesauvage
|
||||
Lestrange
|
||||
Lévêque
|
||||
Lévesque
|
||||
Linville
|
||||
Lyon
|
||||
Lyon
|
||||
Maçon
|
||||
Marchand
|
||||
Marie
|
||||
Marion
|
||||
Martel
|
||||
Martel
|
||||
Martin
|
||||
Masson
|
||||
Masson
|
||||
Mathieu
|
||||
Mercier
|
||||
Merle
|
||||
Michaud
|
||||
Michel
|
||||
Monet
|
||||
Monette
|
||||
Montagne
|
||||
Moreau
|
||||
Moulin
|
||||
Mullins
|
||||
Noel
|
||||
Oliver
|
||||
Olivier
|
||||
Page
|
||||
Paget
|
||||
Palomer
|
||||
Pan
|
||||
Pape
|
||||
Paquet
|
||||
Paquet
|
||||
Parent
|
||||
Paris
|
||||
Parris
|
||||
Pascal
|
||||
Patenaude
|
||||
Paternoster
|
||||
Paul
|
||||
Pelletier
|
||||
Perrault
|
||||
Perreault
|
||||
Perrot
|
||||
Petit
|
||||
Pettigrew
|
||||
Pierre
|
||||
Plamondon
|
||||
Plourde
|
||||
Poingdestre
|
||||
Poirier
|
||||
Porcher
|
||||
Poulin
|
||||
Proulx
|
||||
Renaud
|
||||
Rey
|
||||
Reyer
|
||||
Richard
|
||||
Richelieu
|
||||
Robert
|
||||
Roche
|
||||
Rome
|
||||
Romilly
|
||||
Rose
|
||||
Rousseau
|
||||
Roux
|
||||
Roy
|
||||
Royer
|
||||
Salomon
|
||||
Salvage
|
||||
Samson
|
||||
Samuel
|
||||
Sargent
|
||||
Sarkozi
|
||||
Sarkozy
|
||||
Sartre
|
||||
Sault
|
||||
Sauvage
|
||||
Sauvageau
|
||||
Sauvageon
|
||||
Sauvageot
|
||||
Sauveterre
|
||||
Savatier
|
||||
Segal
|
||||
Sergeant
|
||||
Séverin
|
||||
Simon
|
||||
Solomon
|
||||
Soucy
|
||||
St martin
|
||||
St pierre
|
||||
Tailler
|
||||
Tasse
|
||||
Thayer
|
||||
Thibault
|
||||
Thomas
|
||||
Tobias
|
||||
Tolbert
|
||||
Traver
|
||||
Travere
|
||||
Travers
|
||||
Traverse
|
||||
Travert
|
||||
Tremblay
|
||||
Tremble
|
||||
Victor
|
||||
Victors
|
||||
Villeneuve
|
||||
Vincent
|
||||
Vipond
|
||||
Voclain
|
||||
Yount
|
|
@ -0,0 +1,724 @@
|
|||
Abbing
|
||||
Abel
|
||||
Abeln
|
||||
Abt
|
||||
Achilles
|
||||
Achterberg
|
||||
Acker
|
||||
Ackermann
|
||||
Adam
|
||||
Adenauer
|
||||
Adler
|
||||
Adlersflügel
|
||||
Aeschelman
|
||||
Albert
|
||||
Albrecht
|
||||
Aleshire
|
||||
Aleshite
|
||||
Althaus
|
||||
Amsel
|
||||
Andres
|
||||
Armbrüster
|
||||
Armbruster
|
||||
Artz
|
||||
Aue
|
||||
Auer
|
||||
Augustin
|
||||
Aust
|
||||
Autenburg
|
||||
Auttenberg
|
||||
Baasch
|
||||
Bach
|
||||
Bachmeier
|
||||
Bäcker
|
||||
Bader
|
||||
Bähr
|
||||
Bambach
|
||||
Bauer
|
||||
Bauers
|
||||
Baum
|
||||
Baumann
|
||||
Baumbach
|
||||
Baumgärtner
|
||||
Baumgartner
|
||||
Baumhauer
|
||||
Bayer
|
||||
Beck
|
||||
Becke
|
||||
Beckenbauer
|
||||
Becker
|
||||
Beckert
|
||||
Behrend
|
||||
Behrends
|
||||
Beitel
|
||||
Beltz
|
||||
Benn
|
||||
Berg
|
||||
Berger
|
||||
Bergfalk
|
||||
Beringer
|
||||
Bernat
|
||||
Best
|
||||
Beutel
|
||||
Beyer
|
||||
Beyersdorf
|
||||
Bieber
|
||||
Biermann
|
||||
Bischoffs
|
||||
Blau
|
||||
Blecher
|
||||
Bleier
|
||||
Blumenthal
|
||||
Blumstein
|
||||
Bocker
|
||||
Boehler
|
||||
Boer
|
||||
Boesch
|
||||
Böhler
|
||||
Böhm
|
||||
Böhme
|
||||
Böhmer
|
||||
Bohn
|
||||
Borchard
|
||||
Bösch
|
||||
Bosch
|
||||
Böttcher
|
||||
Brahms
|
||||
Brand
|
||||
Brandt
|
||||
Brant
|
||||
Brauer
|
||||
Braun
|
||||
Braune
|
||||
Breiner
|
||||
Breisacher
|
||||
Breitbarth
|
||||
Bretz
|
||||
Brinkerhoff
|
||||
Brodbeck
|
||||
Brose
|
||||
Brotz
|
||||
Bruhn
|
||||
Brun
|
||||
Brune
|
||||
Buchholz
|
||||
Buckholtz
|
||||
Buhr
|
||||
Bumgarner
|
||||
Burgstaller
|
||||
Busch
|
||||
Carver
|
||||
Chevrolet
|
||||
Cline
|
||||
Dahl
|
||||
Denzel
|
||||
Derrick
|
||||
Diefenbach
|
||||
Dieter
|
||||
Dietrich
|
||||
Dirchs
|
||||
Dittmar
|
||||
Dohman
|
||||
Drechsler
|
||||
Dreher
|
||||
Dreschner
|
||||
Dresdner
|
||||
Dressler
|
||||
Duerr
|
||||
Dunkle
|
||||
Dunst
|
||||
Dürr
|
||||
Eberhardt
|
||||
Ebner
|
||||
Ebner
|
||||
Eckstein
|
||||
Egger
|
||||
Eichel
|
||||
Eilerts
|
||||
Engel
|
||||
Enns
|
||||
Esser
|
||||
Essert
|
||||
Everhart
|
||||
Fabel
|
||||
Faerber
|
||||
Falk
|
||||
Falkenrath
|
||||
Färber
|
||||
Fashingbauer
|
||||
Faust
|
||||
Feigenbaum
|
||||
Feld
|
||||
Feldt
|
||||
Fenstermacher
|
||||
Fertig
|
||||
Fiedler
|
||||
Fischer
|
||||
Flater
|
||||
Fleischer
|
||||
Foerstner
|
||||
Forst
|
||||
Förstner
|
||||
Foth
|
||||
Frank
|
||||
Franke
|
||||
Frei
|
||||
Freud
|
||||
Freudenberger
|
||||
Freund
|
||||
Fried
|
||||
Friedrich
|
||||
Fromm
|
||||
Frost
|
||||
Fuchs
|
||||
Fuhrmann
|
||||
Fürst
|
||||
Fux
|
||||
Gabler
|
||||
Gaertner
|
||||
Garb
|
||||
Garber
|
||||
Gärtner
|
||||
Garver
|
||||
Gass
|
||||
Gehrig
|
||||
Gehring
|
||||
Geier
|
||||
Geiger
|
||||
Geisler
|
||||
Geissler
|
||||
Geiszler
|
||||
Gensch
|
||||
Gerber
|
||||
Gerhard
|
||||
Gerhardt
|
||||
Gerig
|
||||
Gerst
|
||||
Gerstle
|
||||
Gerver
|
||||
Giehl
|
||||
Giese
|
||||
Glöckner
|
||||
Goebel
|
||||
Goldschmidt
|
||||
Gorman
|
||||
Gott
|
||||
Gotti
|
||||
Gottlieb
|
||||
Gottschalk
|
||||
Graner
|
||||
Greenberg
|
||||
Groos
|
||||
Gros
|
||||
Gross
|
||||
Groß
|
||||
Große
|
||||
Grosse
|
||||
Größel
|
||||
Großel
|
||||
Großer
|
||||
Grosser
|
||||
Grosz
|
||||
Grünewald
|
||||
Günther
|
||||
Gunther
|
||||
Gutermuth
|
||||
Gwerder
|
||||
Haas
|
||||
Haase
|
||||
Haber
|
||||
Habich
|
||||
Habicht
|
||||
Hafner
|
||||
Hahn
|
||||
Hall
|
||||
Halle
|
||||
Harman
|
||||
Hartmann
|
||||
Hase
|
||||
Hasek
|
||||
Hasenkamp
|
||||
Hass
|
||||
Hauer
|
||||
Haupt
|
||||
Hausler
|
||||
Havener
|
||||
Heidrich
|
||||
Heinrich
|
||||
Heinrichs
|
||||
Heintze
|
||||
Hellewege
|
||||
Heppenheimer
|
||||
Herbert
|
||||
Hermann
|
||||
Herrmann
|
||||
Herschel
|
||||
Hertz
|
||||
Hildebrand
|
||||
Hinrichs
|
||||
Hintzen
|
||||
Hirsch
|
||||
Hoch
|
||||
Hochberg
|
||||
Hoefler
|
||||
Hofer
|
||||
Hoffman
|
||||
Hoffmann
|
||||
Höfler
|
||||
Hofmann
|
||||
Hofmeister
|
||||
Holst
|
||||
Holtzer
|
||||
Hölzer
|
||||
Holzer
|
||||
Holzknecht
|
||||
Holzmann
|
||||
Hoover
|
||||
Horn
|
||||
Horn
|
||||
Horowitz
|
||||
Houk
|
||||
Hüber
|
||||
Huber
|
||||
Huff
|
||||
Huffman
|
||||
Huffmann
|
||||
Hummel
|
||||
Hummel
|
||||
Hutmacher
|
||||
Ingersleben
|
||||
Jaeger
|
||||
Jäger
|
||||
Jager
|
||||
Jans
|
||||
Janson
|
||||
Janz
|
||||
Jollenbeck
|
||||
Jordan
|
||||
Jund
|
||||
Jung
|
||||
Junge
|
||||
Kahler
|
||||
Kaiser
|
||||
Kalb
|
||||
Kalbfleisch
|
||||
Kappel
|
||||
Karl
|
||||
Kaspar
|
||||
Kassmeyer
|
||||
Kästner
|
||||
Katz
|
||||
Kaube
|
||||
Käufer
|
||||
Kaufer
|
||||
Kauffmann
|
||||
Kaufman
|
||||
Keil
|
||||
Keller
|
||||
Kempf
|
||||
Kerner
|
||||
Kerper
|
||||
Kerwar
|
||||
Kerwer
|
||||
Kiefer
|
||||
Kiefer
|
||||
Kirchner
|
||||
Kistler
|
||||
Kistner
|
||||
Kleid
|
||||
Klein
|
||||
Klossner
|
||||
Knef
|
||||
Kneib
|
||||
Kneller
|
||||
Knepp
|
||||
Knochenmus
|
||||
Knopf
|
||||
Knopp
|
||||
Koch
|
||||
Kock
|
||||
Koenig
|
||||
Koenigsmann
|
||||
Köhl
|
||||
Kohl
|
||||
Köhler
|
||||
Kohler
|
||||
Kolbe
|
||||
König
|
||||
Königsmann
|
||||
Kopp
|
||||
Kraemer
|
||||
Krämer
|
||||
Kramer
|
||||
Krantz
|
||||
Kranz
|
||||
Kraus
|
||||
Krause
|
||||
Krauss
|
||||
Krauß
|
||||
Krebs
|
||||
Kröger
|
||||
Kron
|
||||
Kruckel
|
||||
Krüger
|
||||
Krüger
|
||||
Kruger
|
||||
Kruse
|
||||
Kruse
|
||||
Küchler
|
||||
Kuhn
|
||||
Kundert
|
||||
Kunkel
|
||||
Kunkle
|
||||
Kuntz
|
||||
Kunze
|
||||
Kurzmann
|
||||
Laberenz
|
||||
Lafrentz
|
||||
Lafrenz
|
||||
Landau
|
||||
Lang
|
||||
Lange
|
||||
Langenberg
|
||||
Langer
|
||||
Larenz
|
||||
Laurenz
|
||||
Lauritz
|
||||
Lawerenz
|
||||
Lawrenz
|
||||
Lehmann
|
||||
Lehrer
|
||||
Leitner
|
||||
Leitz
|
||||
Leitzke
|
||||
Lenz
|
||||
Leverenz
|
||||
Lewerentz
|
||||
Lewerenz
|
||||
Lichtenberg
|
||||
Lieberenz
|
||||
Linden
|
||||
Loewe
|
||||
Lohrenz
|
||||
Lorentz
|
||||
Lorenz
|
||||
Lorenzen
|
||||
Loris
|
||||
Loritz
|
||||
Löwe
|
||||
Ludwig
|
||||
Luther
|
||||
Maas
|
||||
Maier
|
||||
Mandel
|
||||
Mann
|
||||
Markwardt
|
||||
Marquardt
|
||||
Marquering
|
||||
Marquerink
|
||||
Martell
|
||||
Martin
|
||||
Martz
|
||||
Mas
|
||||
Maurer
|
||||
Maus
|
||||
Mayer
|
||||
Meier
|
||||
Mein
|
||||
Meindl
|
||||
Meinhardt
|
||||
Meisner
|
||||
Meissner
|
||||
Melsbach
|
||||
Mendel
|
||||
Mendelsohn
|
||||
Mendelssohn
|
||||
Messer
|
||||
Messerli
|
||||
Messmann
|
||||
Messner
|
||||
Metz
|
||||
Metz
|
||||
Metzger
|
||||
Meyer
|
||||
Michel
|
||||
Mohren
|
||||
Möller
|
||||
Morgenstern
|
||||
Moser
|
||||
Mueller
|
||||
Muhlfeld
|
||||
Müller
|
||||
Nagel
|
||||
Neuman
|
||||
Neumann
|
||||
Nuremberg
|
||||
Nussbaum
|
||||
Nussenbaum
|
||||
Oberst
|
||||
Oelberg
|
||||
Ohme
|
||||
Oliver
|
||||
Oppenheimer
|
||||
Ott
|
||||
Otto
|
||||
Oursler
|
||||
Pahlke
|
||||
Papke
|
||||
Papp
|
||||
Paternoster
|
||||
Paul
|
||||
Paulis
|
||||
Pawlitzki
|
||||
Penzig
|
||||
Peter
|
||||
Peters
|
||||
Pfaff
|
||||
Pfenning
|
||||
Plank
|
||||
Pletcher
|
||||
Porsche
|
||||
Portner
|
||||
Prinz
|
||||
Protz
|
||||
Rademacher
|
||||
Rademaker
|
||||
Rapp
|
||||
Raske
|
||||
Raskob
|
||||
Raskop
|
||||
Raskoph
|
||||
Regenbogen
|
||||
Reier
|
||||
Reiher
|
||||
Reiter
|
||||
Rettig
|
||||
Reuter
|
||||
Reuter
|
||||
Richard
|
||||
Richter
|
||||
Rier
|
||||
Riese
|
||||
Ritter
|
||||
Rose
|
||||
Rosenberg
|
||||
Rosenberger
|
||||
Rosenfeld
|
||||
Rot
|
||||
Roth
|
||||
Rothbauer
|
||||
Rothenberg
|
||||
Rothschild
|
||||
Sachs
|
||||
Saller
|
||||
Saller
|
||||
Salomon
|
||||
Salzwedel
|
||||
Samuel
|
||||
Sander
|
||||
Sauber
|
||||
Schäfer
|
||||
Scheer
|
||||
Scheinberg
|
||||
Schenck
|
||||
Schermer
|
||||
Schindler
|
||||
Schirmer
|
||||
Schlender
|
||||
Schlimme
|
||||
Schlusser
|
||||
Schmeling
|
||||
Schmid
|
||||
Schmidt
|
||||
Schmitt
|
||||
Schmitz
|
||||
Schneider
|
||||
Schnoor
|
||||
Schnur
|
||||
Schoettmer
|
||||
Schräder
|
||||
Schrader
|
||||
Schreck
|
||||
Schreier
|
||||
Schröder
|
||||
Schröder
|
||||
Schroeder
|
||||
Schroeter
|
||||
Schröter
|
||||
Schubert
|
||||
Schuchard
|
||||
Schuchardt
|
||||
Schuchert
|
||||
Schuhart
|
||||
Schuhmacher
|
||||
Schuler
|
||||
Schult
|
||||
Schulte
|
||||
Schultes
|
||||
Schultheis
|
||||
Schultheiss
|
||||
Schultheiß
|
||||
Schultz
|
||||
Schultze
|
||||
Schulz
|
||||
Schulze
|
||||
Schumacher
|
||||
Schuster
|
||||
Schuttmann
|
||||
Schwangau
|
||||
Schwartz
|
||||
Schwarz
|
||||
Schwarzenegger
|
||||
Schwenke
|
||||
Schwinghammer
|
||||
Seelenfreund
|
||||
Seidel
|
||||
Senft
|
||||
Senft
|
||||
Sheinfeld
|
||||
Shriver
|
||||
Siegel
|
||||
Siegel
|
||||
Siekert
|
||||
Siemon
|
||||
Silverstein
|
||||
Simen
|
||||
Simmon
|
||||
Simon
|
||||
Simons
|
||||
Siskin
|
||||
Siskind
|
||||
Sitz
|
||||
Sitz
|
||||
Slusser
|
||||
Solberg
|
||||
Sommer
|
||||
Sommer
|
||||
Sommer
|
||||
Sommer
|
||||
Sonnen
|
||||
Sorg
|
||||
Sorge
|
||||
Spannagel
|
||||
Specht
|
||||
Spellmeyer
|
||||
Spitznogle
|
||||
Sponaugle
|
||||
Stark
|
||||
Stauss
|
||||
Steen
|
||||
Steffen
|
||||
Stein
|
||||
Steinmann
|
||||
Stenger
|
||||
Sternberg
|
||||
Steube
|
||||
Steuben
|
||||
Stieber
|
||||
Stoppelbein
|
||||
Stoppelbein
|
||||
Strand
|
||||
Straub
|
||||
Strobel
|
||||
Strohkirch
|
||||
Stroman
|
||||
Stuber
|
||||
Stueck
|
||||
Stumpf
|
||||
Sturm
|
||||
Suess
|
||||
Sulzbach
|
||||
Swango
|
||||
Switzer
|
||||
Tangeman
|
||||
Tanzer
|
||||
Teufel
|
||||
Tiedeman
|
||||
Tifft
|
||||
Tillens
|
||||
Tobias
|
||||
Tolkien
|
||||
Tresler
|
||||
Tritten
|
||||
Trumbauer
|
||||
Tschida
|
||||
Unkle
|
||||
Unruh
|
||||
Unterbrink
|
||||
Ursler
|
||||
Vann
|
||||
Van tonder
|
||||
Vieth
|
||||
Vogel
|
||||
Vogt
|
||||
Vogts
|
||||
Voigt
|
||||
Voigts
|
||||
Volk
|
||||
Voll
|
||||
Von brandt
|
||||
Von essen
|
||||
Von grimmelshausen
|
||||
Von ingersleben
|
||||
Vonnegut
|
||||
Von wegberg
|
||||
Voss
|
||||
Voß
|
||||
Wägner
|
||||
Wagner
|
||||
Wähner
|
||||
Wahner
|
||||
Waldfogel
|
||||
Waldvogel
|
||||
Walkenhorst
|
||||
Walter
|
||||
Walther
|
||||
Waltz
|
||||
Wang
|
||||
Warner
|
||||
Waxweiler
|
||||
Weber
|
||||
Wechsler
|
||||
Wedekind
|
||||
Weeber
|
||||
Wegener
|
||||
Wegner
|
||||
Wehner
|
||||
Wehunt
|
||||
Weigand
|
||||
Weiman
|
||||
Weiner
|
||||
Weiss
|
||||
Weiß
|
||||
Welter
|
||||
Wendel
|
||||
Wendell
|
||||
Werner
|
||||
Wernher
|
||||
West
|
||||
Westerberg
|
||||
Wetterman
|
||||
Wetzel
|
||||
Wexler
|
||||
Wieck
|
||||
Wiegand
|
||||
Wildgrube
|
||||
Winter
|
||||
Winther
|
||||
Winther
|
||||
Wirner
|
||||
Wirnhier
|
||||
Wirt
|
||||
Wirth
|
||||
Wolf
|
||||
Wolff
|
||||
Wolter
|
||||
Wörner
|
||||
Wörnhör
|
||||
Wruck
|
||||
Wyman
|
||||
Xylander
|
||||
Zellweger
|
||||
Zilberschlag
|
||||
Zimmerman
|
||||
Zimmermann
|
|
@ -0,0 +1,203 @@
|
|||
Adamidis
|
||||
Adamou
|
||||
Agelakos
|
||||
Akrivopoulos
|
||||
Alexandropoulos
|
||||
Anetakis
|
||||
Angelopoulos
|
||||
Antimisiaris
|
||||
Antipas
|
||||
Antonakos
|
||||
Antoniadis
|
||||
Antonopoulos
|
||||
Antonopoulos
|
||||
Antonopoulos
|
||||
Arvanitoyannis
|
||||
Avgerinos
|
||||
Banos
|
||||
Batsakis
|
||||
Bekyros
|
||||
Belesis
|
||||
Bertsimas
|
||||
Bilias
|
||||
Blades
|
||||
Bouloukos
|
||||
Brisimitzakis
|
||||
Bursinos
|
||||
Calogerakis
|
||||
Calpis
|
||||
Chellos
|
||||
Christakos
|
||||
Christodoulou
|
||||
Christou
|
||||
Chrysanthopoulos
|
||||
Chrysanthopoulos
|
||||
Comino
|
||||
Close
|
||||
Close
|
||||
Close
|
||||
Close
|
||||
Close
|
||||
Close
|
||||
Close
|
||||
Close
|
||||
Dalianis
|
||||
Danas
|
||||
Dasios
|
||||
Demakis
|
||||
Demarchis
|
||||
Demas
|
||||
Demetrious
|
||||
Dertilis
|
||||
Diakogeorgiou
|
||||
Dioletis
|
||||
Dounias
|
||||
Dritsas
|
||||
Drivakis
|
||||
Eatros
|
||||
Egonidis
|
||||
Eliopoulos
|
||||
Forakis
|
||||
Fotopoulos
|
||||
Fourakis
|
||||
Frangopoulos
|
||||
Galanopoulos
|
||||
Garofalis
|
||||
Gavril
|
||||
Gavrilopoulos
|
||||
Georgeakopoulos
|
||||
Geracimos
|
||||
Gianakopulos
|
||||
Giannakopoulos
|
||||
Giannakos
|
||||
Glynatsis
|
||||
Gomatos
|
||||
Grammatakakis
|
||||
Gravari
|
||||
Hadjiyianakies
|
||||
Hagias
|
||||
Haritopoulos
|
||||
Honjas
|
||||
Horiatis
|
||||
Houlis
|
||||
Jamussa
|
||||
Kaglantge
|
||||
Kalakos
|
||||
Kalogeria
|
||||
Kaloxylos
|
||||
Kanavos
|
||||
Kapsimalles
|
||||
Karahalios
|
||||
Karameros
|
||||
Karkampasis
|
||||
Karnoupakis
|
||||
Katsourinis
|
||||
Kefalas
|
||||
Kokkali
|
||||
Kokoris
|
||||
Kolovos
|
||||
Konstantatos
|
||||
Kosmas
|
||||
Kotsilimbas
|
||||
Kotsiopoulos
|
||||
Kouches
|
||||
Koulaxizis
|
||||
Koumanidis
|
||||
Kourempes
|
||||
Kouretas
|
||||
Kouropoulos
|
||||
Kouros
|
||||
Koustoubos
|
||||
Koutsoubos
|
||||
Kreskas
|
||||
Kringos
|
||||
Kyritsis
|
||||
Laganas
|
||||
Leontarakis
|
||||
Letsos
|
||||
Liatos
|
||||
Lillis
|
||||
Lolos
|
||||
Louverdis
|
||||
Makricosta
|
||||
Malihoudis
|
||||
Maneates
|
||||
Manos
|
||||
Manoukarakis
|
||||
Matsoukis
|
||||
Mentis
|
||||
Mersinias
|
||||
Metrofanis
|
||||
Michalaras
|
||||
Milionis
|
||||
Missiakos
|
||||
Moraitopoulos
|
||||
Nikolaou
|
||||
Nomikos
|
||||
Paitakes
|
||||
Paloumbas
|
||||
Panayiotopoulos
|
||||
Panoulias
|
||||
Pantelakos
|
||||
Pantelas
|
||||
Papadelias
|
||||
Papadopulos
|
||||
Papageorge
|
||||
Papoutsis
|
||||
Pappayiorgas
|
||||
Paraskevopoulos
|
||||
Paraskos
|
||||
Paschalis
|
||||
Patrianakos
|
||||
Patselas
|
||||
Pefanis
|
||||
Petimezas
|
||||
Petrakis
|
||||
Pezos
|
||||
Phocas
|
||||
Pispinis
|
||||
Polites
|
||||
Polymenakou
|
||||
Poniros
|
||||
Protopsaltis
|
||||
Rallis
|
||||
Rigatos
|
||||
Rorris
|
||||
Rousses
|
||||
Ruvelas
|
||||
Sakelaris
|
||||
Sakellariou
|
||||
Samios
|
||||
Sardelis
|
||||
Sfakianos
|
||||
Sklavenitis
|
||||
Sortras
|
||||
Sotiris
|
||||
Spyridis
|
||||
Stamatas
|
||||
Stamatelos
|
||||
Stavropoulos
|
||||
Strilakos
|
||||
Stroggylis
|
||||
Tableriou
|
||||
Taflambas
|
||||
Tassioglou
|
||||
Telis
|
||||
Tsoumada
|
||||
Theofilopoulos
|
||||
Theohari
|
||||
Totolos
|
||||
Tourna
|
||||
Tsahalis
|
||||
Tsangaris
|
||||
Tselios
|
||||
Tsogas
|
||||
Vamvakidis
|
||||
Varvitsiotes
|
||||
Vassilikos
|
||||
Vassilopulos
|
||||
Vlahos
|
||||
Vourlis
|
||||
Xydis
|
||||
Zaloumi
|
||||
Zouvelekis
|
|
@ -0,0 +1,232 @@
|
|||
Adam
|
||||
Ahearn
|
||||
Aodh
|
||||
Aodha
|
||||
Aonghuis
|
||||
Aonghus
|
||||
Bhrighde
|
||||
Bradach
|
||||
Bradan
|
||||
Braden
|
||||
Brady
|
||||
Bran
|
||||
Brannon
|
||||
Brian
|
||||
Callaghan
|
||||
Caomh
|
||||
Carey
|
||||
Casey
|
||||
Cassidy
|
||||
Cathain
|
||||
Cathan
|
||||
Cathasach
|
||||
Ceallach
|
||||
Ceallachan
|
||||
Cearbhall
|
||||
Cennetig
|
||||
Ciardha
|
||||
Clark
|
||||
Cleirich
|
||||
Cleirigh
|
||||
Cnaimhin
|
||||
Coghlan
|
||||
Coilean
|
||||
Collins
|
||||
Colman
|
||||
Conall
|
||||
Conchobhar
|
||||
Conn
|
||||
Connell
|
||||
Connolly
|
||||
Cormac
|
||||
Corraidhin
|
||||
Cuidightheach
|
||||
Curran
|
||||
Dúbhshlaine
|
||||
Dalach
|
||||
Daly
|
||||
Damhain
|
||||
Damhan
|
||||
Delaney
|
||||
Desmond
|
||||
Devin
|
||||
Diarmaid
|
||||
Doherty
|
||||
Domhnall
|
||||
Donnchadh
|
||||
Donndubhan
|
||||
Donnell
|
||||
Donoghue
|
||||
Donovan
|
||||
Doyle
|
||||
Dubhain
|
||||
Dubhan
|
||||
Duncan
|
||||
Eoghan
|
||||
Eoin
|
||||
Eoin
|
||||
Faolan
|
||||
Farrell
|
||||
Fearghal
|
||||
Fergus
|
||||
Finn
|
||||
Finnegan
|
||||
Fionn
|
||||
Flanagan
|
||||
Flann
|
||||
Flynn
|
||||
Gallchobhar
|
||||
Gerald
|
||||
Giolla
|
||||
Gorman
|
||||
Hayden
|
||||
Ivor
|
||||
John
|
||||
Kavanagh
|
||||
Keefe
|
||||
Kelly
|
||||
Kennedy
|
||||
Lennon
|
||||
Login
|
||||
Macclelland
|
||||
Macdermott
|
||||
Maceachthighearna
|
||||
Macfarland
|
||||
Macghabhann
|
||||
Maciomhair
|
||||
Macshuibhne
|
||||
Madaidhin
|
||||
Madden
|
||||
Maguire
|
||||
Mahoney
|
||||
Maille
|
||||
Malone
|
||||
Manus
|
||||
Maolmhuaidh
|
||||
Mathghamhain
|
||||
Maurice
|
||||
Mcguire
|
||||
Mckay
|
||||
Mclain
|
||||
Mcmahon
|
||||
Mcnab
|
||||
Mcneil
|
||||
Meadhra
|
||||
Michael
|
||||
Milligan
|
||||
Mochan
|
||||
Mohan
|
||||
Molloy
|
||||
Monahan
|
||||
Mooney
|
||||
Muirchertach
|
||||
Mullen
|
||||
Mulryan
|
||||
Murchadh
|
||||
Murphy
|
||||
Names
|
||||
Naoimhin
|
||||
Naomhan
|
||||
Neil
|
||||
Neville
|
||||
Nevin
|
||||
Niadh
|
||||
Niall
|
||||
Nolan
|
||||
Nuallan
|
||||
O'Boyle
|
||||
O'Brien
|
||||
O'Byrne
|
||||
O'Donnell
|
||||
O'Hannagain
|
||||
O'Hannigain
|
||||
O'Keefe
|
||||
O'Mooney
|
||||
O'Neal
|
||||
O'Boyle
|
||||
O'Bree
|
||||
O'Brian
|
||||
O'Brien
|
||||
O'Callaghann
|
||||
O'Connell
|
||||
O'Connor
|
||||
O'Dell
|
||||
O'Doherty
|
||||
O'Donnell
|
||||
O'Donoghue
|
||||
O'Dowd
|
||||
O'Driscoll
|
||||
O'Gorman
|
||||
O'Grady
|
||||
O'Hagan
|
||||
O'Halloran
|
||||
O'Hanlon
|
||||
O'Hara
|
||||
O'Hare
|
||||
O'Kane
|
||||
O'Keefe
|
||||
O'Keeffe
|
||||
O'Kelly
|
||||
O'Leary
|
||||
O'Loughlin
|
||||
O'Mahoney
|
||||
O'Mahony
|
||||
O'Malley
|
||||
O'Meara
|
||||
O'Neal
|
||||
O'Neill
|
||||
O'Reilly
|
||||
O'Rourke
|
||||
O'Ryan
|
||||
O'Shea
|
||||
O'Sullivan
|
||||
O'Toole
|
||||
Patrick
|
||||
Peatain
|
||||
Pharlain
|
||||
Power
|
||||
Quigley
|
||||
Quinn
|
||||
Quirke
|
||||
Raghailligh
|
||||
Reagan
|
||||
Register
|
||||
Reilly
|
||||
Reynold
|
||||
Rhys
|
||||
Riagain
|
||||
Riagan
|
||||
Riain
|
||||
Rian
|
||||
Rinn
|
||||
Roach
|
||||
Rodagh
|
||||
Rory
|
||||
Ruadh
|
||||
Ruadhain
|
||||
Ruadhan
|
||||
Ruaidh
|
||||
Samuel
|
||||
Scolaidhe
|
||||
Seaghdha
|
||||
Sechnall
|
||||
Seighin
|
||||
Shannon
|
||||
Sheehy
|
||||
Simon
|
||||
Sioda
|
||||
Sloan
|
||||
Sluaghadhan
|
||||
Suaird
|
||||
Sullivan
|
||||
Tadhg
|
||||
Tadhgan
|
||||
Taidhg
|
||||
Teagan
|
||||
Teague
|
||||
Tighearnach
|
||||
Tracey
|
||||
Treasach
|
||||
Whalen
|
||||
Whelan
|
||||
William
|
|
@ -0,0 +1,709 @@
|
|||
Abandonato
|
||||
Abatangelo
|
||||
Abatantuono
|
||||
Abate
|
||||
Abategiovanni
|
||||
Abatescianni
|
||||
Abbà
|
||||
Abbadelli
|
||||
Abbascia
|
||||
Abbatangelo
|
||||
Abbatantuono
|
||||
Abbate
|
||||
Abbatelli
|
||||
Abbaticchio
|
||||
Abbiati
|
||||
Abbracciabene
|
||||
Abbracciabeni
|
||||
Abelli
|
||||
Abelló
|
||||
Abrami
|
||||
Abramo
|
||||
Acardi
|
||||
Accardi
|
||||
Accardo
|
||||
Acciai
|
||||
Acciaio
|
||||
Acciaioli
|
||||
Acconci
|
||||
Acconcio
|
||||
Accorsi
|
||||
Accorso
|
||||
Accosi
|
||||
Accursio
|
||||
Acerbi
|
||||
Acone
|
||||
Aconi
|
||||
Acqua
|
||||
Acquafredda
|
||||
Acquarone
|
||||
Acquati
|
||||
Adalardi
|
||||
Adami
|
||||
Adamo
|
||||
Adamoli
|
||||
Addario
|
||||
Adelardi
|
||||
Adessi
|
||||
Adimari
|
||||
Adriatico
|
||||
Affini
|
||||
Africani
|
||||
Africano
|
||||
Agani
|
||||
Aggi
|
||||
Aggio
|
||||
Agli
|
||||
Agnelli
|
||||
Agnellutti
|
||||
Agnusdei
|
||||
Agosti
|
||||
Agostini
|
||||
Agresta
|
||||
Agrioli
|
||||
Aiello
|
||||
Aiolfi
|
||||
Airaldi
|
||||
Airò
|
||||
Aita
|
||||
Ajello
|
||||
Alagona
|
||||
Alamanni
|
||||
Albanesi
|
||||
Albani
|
||||
Albano
|
||||
Alberghi
|
||||
Alberghini
|
||||
Alberici
|
||||
Alberighi
|
||||
Albero
|
||||
Albini
|
||||
Albricci
|
||||
Albrici
|
||||
Alcheri
|
||||
Aldebrandi
|
||||
Alderisi
|
||||
Alduino
|
||||
Alemagna
|
||||
Aleppo
|
||||
Alesci
|
||||
Alescio
|
||||
Alesi
|
||||
Alesini
|
||||
Alesio
|
||||
Alessandri
|
||||
Alessi
|
||||
Alfero
|
||||
Aliberti
|
||||
Alinari
|
||||
Aliprandi
|
||||
Allegri
|
||||
Allegro
|
||||
Alò
|
||||
Aloia
|
||||
Aloisi
|
||||
Altamura
|
||||
Altimari
|
||||
Altoviti
|
||||
Alunni
|
||||
Amadei
|
||||
Amadori
|
||||
Amalberti
|
||||
Amantea
|
||||
Amato
|
||||
Amatore
|
||||
Ambrogi
|
||||
Ambrosi
|
||||
Amello
|
||||
Amerighi
|
||||
Amoretto
|
||||
Angioli
|
||||
Ansaldi
|
||||
Anselmetti
|
||||
Anselmi
|
||||
Antonelli
|
||||
Antonini
|
||||
Antonino
|
||||
Aquila
|
||||
Aquino
|
||||
Arbore
|
||||
Ardiccioni
|
||||
Ardizzone
|
||||
Ardovini
|
||||
Arena
|
||||
Aringheri
|
||||
Arlotti
|
||||
Armani
|
||||
Armati
|
||||
Armonni
|
||||
Arnolfi
|
||||
Arnoni
|
||||
Arrighetti
|
||||
Arrighi
|
||||
Arrigucci
|
||||
Aucciello
|
||||
Azzarà
|
||||
Baggi
|
||||
Baggio
|
||||
Baglio
|
||||
Bagni
|
||||
Bagnoli
|
||||
Balboni
|
||||
Baldi
|
||||
Baldini
|
||||
Baldinotti
|
||||
Baldovini
|
||||
Bandini
|
||||
Bandoni
|
||||
Barbieri
|
||||
Barone
|
||||
Barsetti
|
||||
Bartalotti
|
||||
Bartolomei
|
||||
Bartolomeo
|
||||
Barzetti
|
||||
Basile
|
||||
Bassanelli
|
||||
Bassani
|
||||
Bassi
|
||||
Basso
|
||||
Basurto
|
||||
Battaglia
|
||||
Bazzoli
|
||||
Bellandi
|
||||
Bellandini
|
||||
Bellincioni
|
||||
Bellini
|
||||
Bello
|
||||
Bellomi
|
||||
Belloni
|
||||
Belluomi
|
||||
Belmonte
|
||||
Bencivenni
|
||||
Benedetti
|
||||
Benenati
|
||||
Benetton
|
||||
Benini
|
||||
Benivieni
|
||||
Benvenuti
|
||||
Berardi
|
||||
Bergamaschi
|
||||
Berti
|
||||
Bertolini
|
||||
Biancardi
|
||||
Bianchi
|
||||
Bicchieri
|
||||
Biondi
|
||||
Biondo
|
||||
Boerio
|
||||
Bologna
|
||||
Bondesan
|
||||
Bonomo
|
||||
Borghi
|
||||
Borgnino
|
||||
Borgogni
|
||||
Bosco
|
||||
Bove
|
||||
Bovér
|
||||
Boveri
|
||||
Brambani
|
||||
Brambilla
|
||||
Breda
|
||||
Brioschi
|
||||
Brivio
|
||||
Brunetti
|
||||
Bruno
|
||||
Buffone
|
||||
Bulgarelli
|
||||
Bulgari
|
||||
Buonarroti
|
||||
Busto
|
||||
Caiazzo
|
||||
Caito
|
||||
Caivano
|
||||
Calabrese
|
||||
Calligaris
|
||||
Campana
|
||||
Campo
|
||||
Cantu
|
||||
Capello
|
||||
Capello
|
||||
Capello
|
||||
Capitani
|
||||
Carbone
|
||||
Carboni
|
||||
Carideo
|
||||
Carlevaro
|
||||
Caro
|
||||
Carracci
|
||||
Carrara
|
||||
Caruso
|
||||
Cassano
|
||||
Castro
|
||||
Catalano
|
||||
Cattaneo
|
||||
Cavalcante
|
||||
Cavallo
|
||||
Cingolani
|
||||
Cino
|
||||
Cipriani
|
||||
Cisternino
|
||||
Coiro
|
||||
Cola
|
||||
Colombera
|
||||
Colombo
|
||||
Columbo
|
||||
Como
|
||||
Como
|
||||
Confortola
|
||||
Conti
|
||||
Corna
|
||||
Corti
|
||||
Corvi
|
||||
Costa
|
||||
Costantini
|
||||
Costanzo
|
||||
Cracchiolo
|
||||
Cremaschi
|
||||
Cremona
|
||||
Cremonesi
|
||||
Crespo
|
||||
Croce
|
||||
Crocetti
|
||||
Cucinotta
|
||||
Cuocco
|
||||
Cuoco
|
||||
D'ambrosio
|
||||
Damiani
|
||||
D'amore
|
||||
D'angelo
|
||||
D'antonio
|
||||
De angelis
|
||||
De campo
|
||||
De felice
|
||||
De filippis
|
||||
De fiore
|
||||
De laurentis
|
||||
De luca
|
||||
De palma
|
||||
De rege
|
||||
De santis
|
||||
De vitis
|
||||
Di antonio
|
||||
Di caprio
|
||||
Di mercurio
|
||||
Dinapoli
|
||||
Dioli
|
||||
Di pasqua
|
||||
Di pietro
|
||||
Di stefano
|
||||
Donati
|
||||
D'onofrio
|
||||
Drago
|
||||
Durante
|
||||
Elena
|
||||
Episcopo
|
||||
Ermacora
|
||||
Esposito
|
||||
Evangelista
|
||||
Fabbri
|
||||
Fabbro
|
||||
Falco
|
||||
Faraldo
|
||||
Farina
|
||||
Farro
|
||||
Fattore
|
||||
Fausti
|
||||
Fava
|
||||
Favero
|
||||
Fermi
|
||||
Ferrara
|
||||
Ferrari
|
||||
Ferraro
|
||||
Ferrero
|
||||
Ferro
|
||||
Fierro
|
||||
Filippi
|
||||
Fini
|
||||
Fiore
|
||||
Fiscella
|
||||
Fiscella
|
||||
Fonda
|
||||
Fontana
|
||||
Fortunato
|
||||
Franco
|
||||
Franzese
|
||||
Furlan
|
||||
Gabrielli
|
||||
Gagliardi
|
||||
Gallo
|
||||
Ganza
|
||||
Garfagnini
|
||||
Garofalo
|
||||
Gaspari
|
||||
Gatti
|
||||
Genovese
|
||||
Gentile
|
||||
Germano
|
||||
Giannino
|
||||
Gimondi
|
||||
Giordano
|
||||
Gismondi
|
||||
Giùgovaz
|
||||
Giunta
|
||||
Goretti
|
||||
Gori
|
||||
Greco
|
||||
Grillo
|
||||
Grimaldi
|
||||
Gronchi
|
||||
Guarneri
|
||||
Guerra
|
||||
Guerriero
|
||||
Guidi
|
||||
Guttuso
|
||||
Idoni
|
||||
Innocenti
|
||||
Labriola
|
||||
Làconi
|
||||
Laganà
|
||||
Lagomarsìno
|
||||
Lagorio
|
||||
Laguardia
|
||||
Lama
|
||||
Lamberti
|
||||
Lamon
|
||||
Landi
|
||||
Lando
|
||||
Landolfi
|
||||
Laterza
|
||||
Laurito
|
||||
Lazzari
|
||||
Lecce
|
||||
Leccese
|
||||
Leggièri
|
||||
Lèmmi
|
||||
Leone
|
||||
Leoni
|
||||
Lippi
|
||||
Locatelli
|
||||
Lombardi
|
||||
Longo
|
||||
Lupo
|
||||
Luzzatto
|
||||
Maestri
|
||||
Magro
|
||||
Mancini
|
||||
Manco
|
||||
Mancuso
|
||||
Manfredi
|
||||
Manfredonia
|
||||
Mantovani
|
||||
Marchegiano
|
||||
Marchesi
|
||||
Marchetti
|
||||
Marchioni
|
||||
Marconi
|
||||
Mari
|
||||
Maria
|
||||
Mariani
|
||||
Marino
|
||||
Marmo
|
||||
Martelli
|
||||
Martinelli
|
||||
Masi
|
||||
Masin
|
||||
Mazza
|
||||
Merlo
|
||||
Messana
|
||||
Micheli
|
||||
Milani
|
||||
Milano
|
||||
Modugno
|
||||
Mondadori
|
||||
Mondo
|
||||
Montagna
|
||||
Montana
|
||||
Montanari
|
||||
Monte
|
||||
Monti
|
||||
Morandi
|
||||
Morello
|
||||
Moretti
|
||||
Morra
|
||||
Moschella
|
||||
Mosconi
|
||||
Motta
|
||||
Muggia
|
||||
Muraro
|
||||
Murgia
|
||||
Murtas
|
||||
Nacar
|
||||
Naggi
|
||||
Naggia
|
||||
Naldi
|
||||
Nana
|
||||
Nani
|
||||
Nanni
|
||||
Nannini
|
||||
Napoleoni
|
||||
Napoletani
|
||||
Napoliello
|
||||
Nardi
|
||||
Nardo
|
||||
Nardovino
|
||||
Nasato
|
||||
Nascimbene
|
||||
Nascimbeni
|
||||
Natale
|
||||
Nave
|
||||
Nazario
|
||||
Necchi
|
||||
Negri
|
||||
Negrini
|
||||
Nelli
|
||||
Nenci
|
||||
Nepi
|
||||
Neri
|
||||
Neroni
|
||||
Nervetti
|
||||
Nervi
|
||||
Nespola
|
||||
Nicastro
|
||||
Nicchi
|
||||
Nicodemo
|
||||
Nicolai
|
||||
Nicolosi
|
||||
Nicosia
|
||||
Nicotera
|
||||
Nieddu
|
||||
Nieri
|
||||
Nigro
|
||||
Nisi
|
||||
Nizzola
|
||||
Noschese
|
||||
Notaro
|
||||
Notoriano
|
||||
Oberti
|
||||
Oberto
|
||||
Ongaro
|
||||
Orlando
|
||||
Orsini
|
||||
Pace
|
||||
Padovan
|
||||
Padovano
|
||||
Pagani
|
||||
Pagano
|
||||
Palladino
|
||||
Palmisano
|
||||
Palumbo
|
||||
Panzavecchia
|
||||
Parisi
|
||||
Parma
|
||||
Parodi
|
||||
Parri
|
||||
Parrino
|
||||
Passerini
|
||||
Pastore
|
||||
Paternoster
|
||||
Pavesi
|
||||
Pavone
|
||||
Pavoni
|
||||
Pecora
|
||||
Pedrotti
|
||||
Pellegrino
|
||||
Perugia
|
||||
Pesaresi
|
||||
Pesaro
|
||||
Pesce
|
||||
Petri
|
||||
Pherigo
|
||||
Piazza
|
||||
Piccirillo
|
||||
Piccoli
|
||||
Pierno
|
||||
Pietri
|
||||
Pini
|
||||
Piovene
|
||||
Piraino
|
||||
Pisani
|
||||
Pittaluga
|
||||
Poggi
|
||||
Poggio
|
||||
Poletti
|
||||
Pontecorvo
|
||||
Portelli
|
||||
Porto
|
||||
Portoghese
|
||||
Potenza
|
||||
Pozzi
|
||||
Profeta
|
||||
Prosdocimi
|
||||
Provenza
|
||||
Provenzano
|
||||
Pugliese
|
||||
Quaranta
|
||||
Quattrocchi
|
||||
Ragno
|
||||
Raimondi
|
||||
Rais
|
||||
Rana
|
||||
Raneri
|
||||
Rao
|
||||
Rapallino
|
||||
Ratti
|
||||
Ravenna
|
||||
Ré
|
||||
Ricchetti
|
||||
Ricci
|
||||
Riggi
|
||||
Righi
|
||||
Rinaldi
|
||||
Riva
|
||||
Rizzo
|
||||
Robustelli
|
||||
Rocca
|
||||
Rocchi
|
||||
Rocco
|
||||
Roma
|
||||
Roma
|
||||
Romagna
|
||||
Romagnoli
|
||||
Romano
|
||||
Romano
|
||||
Romero
|
||||
Roncalli
|
||||
Ronchi
|
||||
Rosa
|
||||
Rossi
|
||||
Rossini
|
||||
Rotolo
|
||||
Rovigatti
|
||||
Ruggeri
|
||||
Russo
|
||||
Rustici
|
||||
Ruzzier
|
||||
Sabbadin
|
||||
Sacco
|
||||
Sala
|
||||
Salomon
|
||||
Salucci
|
||||
Salvaggi
|
||||
Salvai
|
||||
Salvail
|
||||
Salvatici
|
||||
Salvay
|
||||
Sanna
|
||||
Sansone
|
||||
Santini
|
||||
Santoro
|
||||
Sapienti
|
||||
Sarno
|
||||
Sarti
|
||||
Sartini
|
||||
Sarto
|
||||
Savona
|
||||
Scarpa
|
||||
Scarsi
|
||||
Scavo
|
||||
Sciacca
|
||||
Sciacchitano
|
||||
Sciarra
|
||||
Scordato
|
||||
Scotti
|
||||
Scutese
|
||||
Sebastiani
|
||||
Sebastino
|
||||
Segreti
|
||||
Selmone
|
||||
Selvaggio
|
||||
Serafin
|
||||
Serafini
|
||||
Serpico
|
||||
Sessa
|
||||
Sgro
|
||||
Siena
|
||||
Silvestri
|
||||
Sinagra
|
||||
Sinagra
|
||||
Soldati
|
||||
Somma
|
||||
Sordi
|
||||
Soriano
|
||||
Sorrentino
|
||||
Spada
|
||||
Spanò
|
||||
Sparacello
|
||||
Speziale
|
||||
Spini
|
||||
Stabile
|
||||
Stablum
|
||||
Stilo
|
||||
Sultana
|
||||
Tafani
|
||||
Tamàro
|
||||
Tamboia
|
||||
Tanzi
|
||||
Tarantino
|
||||
Taverna
|
||||
Tedesco
|
||||
Terranova
|
||||
Terzi
|
||||
Tessaro
|
||||
Testa
|
||||
Tiraboschi
|
||||
Tivoli
|
||||
Todaro
|
||||
Toloni
|
||||
Tornincasa
|
||||
Toselli
|
||||
Tosetti
|
||||
Tosi
|
||||
Tosto
|
||||
Trapani
|
||||
Traversa
|
||||
Traversi
|
||||
Traversini
|
||||
Traverso
|
||||
Trucco
|
||||
Trudu
|
||||
Tumicelli
|
||||
Turati
|
||||
Turchi
|
||||
Uberti
|
||||
Uccello
|
||||
Uggeri
|
||||
Ughi
|
||||
Ungaretti
|
||||
Ungaro
|
||||
Vacca
|
||||
Vaccaro
|
||||
Valenti
|
||||
Valentini
|
||||
Valerio
|
||||
Varano
|
||||
Ventimiglia
|
||||
Ventura
|
||||
Verona
|
||||
Veronesi
|
||||
Vescovi
|
||||
Vespa
|
||||
Vestri
|
||||
Vicario
|
||||
Vico
|
||||
Vigo
|
||||
Villa
|
||||
Vinci
|
||||
Vinci
|
||||
Viola
|
||||
Vitali
|
||||
Viteri
|
||||
Voltolini
|
||||
Zambrano
|
||||
Zanetti
|
||||
Zangari
|
||||
Zappa
|
||||
Zeni
|
||||
Zini
|
||||
Zino
|
||||
Zunino
|
|
@ -0,0 +1,991 @@
|
|||
Abe
|
||||
Abukara
|
||||
Adachi
|
||||
Aida
|
||||
Aihara
|
||||
Aizawa
|
||||
Ajibana
|
||||
Akaike
|
||||
Akamatsu
|
||||
Akatsuka
|
||||
Akechi
|
||||
Akera
|
||||
Akimoto
|
||||
Akita
|
||||
Akiyama
|
||||
Akutagawa
|
||||
Amagawa
|
||||
Amaya
|
||||
Amori
|
||||
Anami
|
||||
Ando
|
||||
Anzai
|
||||
Aoki
|
||||
Arai
|
||||
Arakawa
|
||||
Araki
|
||||
Arakida
|
||||
Arato
|
||||
Arihyoshi
|
||||
Arishima
|
||||
Arita
|
||||
Ariwa
|
||||
Ariwara
|
||||
Asahara
|
||||
Asahi
|
||||
Asai
|
||||
Asano
|
||||
Asanuma
|
||||
Asari
|
||||
Ashia
|
||||
Ashida
|
||||
Ashikaga
|
||||
Asuhara
|
||||
Atshushi
|
||||
Ayabito
|
||||
Ayugai
|
||||
Baba
|
||||
Baisotei
|
||||
Bando
|
||||
Bunya
|
||||
Chiba
|
||||
Chikamatsu
|
||||
Chikanatsu
|
||||
Chino
|
||||
Chishu
|
||||
Choshi
|
||||
Daishi
|
||||
Dan
|
||||
Date
|
||||
Dazai
|
||||
Deguchi
|
||||
Deushi
|
||||
Doi
|
||||
Ebina
|
||||
Ebisawa
|
||||
Eda
|
||||
Egami
|
||||
Eguchi
|
||||
Ekiguchi
|
||||
Endo
|
||||
Endoso
|
||||
Enoki
|
||||
Enomoto
|
||||
Erizawa
|
||||
Eto
|
||||
Etsuko
|
||||
Ezakiya
|
||||
Fuchida
|
||||
Fugunaga
|
||||
Fujikage
|
||||
Fujimaki
|
||||
Fujimoto
|
||||
Fujioka
|
||||
Fujishima
|
||||
Fujita
|
||||
Fujiwara
|
||||
Fukao
|
||||
Fukayama
|
||||
Fukuda
|
||||
Fukumitsu
|
||||
Fukunaka
|
||||
Fukuoka
|
||||
Fukusaku
|
||||
Fukushima
|
||||
Fukuyama
|
||||
Fukuzawa
|
||||
Fumihiko
|
||||
Funabashi
|
||||
Funaki
|
||||
Funakoshi
|
||||
Furusawa
|
||||
Fuschida
|
||||
Fuse
|
||||
Futabatei
|
||||
Fuwa
|
||||
Gakusha
|
||||
Genda
|
||||
Genji
|
||||
Gensai
|
||||
Godo
|
||||
Goto
|
||||
Gushiken
|
||||
Hachirobei
|
||||
Haga
|
||||
Hagino
|
||||
Hagiwara
|
||||
Hama
|
||||
Hamacho
|
||||
Hamada
|
||||
Hamaguchi
|
||||
Hamamoto
|
||||
Hanabusa
|
||||
Hanari
|
||||
Handa
|
||||
Hara
|
||||
Harada
|
||||
Haruguchi
|
||||
Hasegawa
|
||||
Hasekura
|
||||
Hashimoto
|
||||
Hasimoto
|
||||
Hatakeda
|
||||
Hatakeyama
|
||||
Hatayama
|
||||
Hatoyama
|
||||
Hattori
|
||||
Hayakawa
|
||||
Hayami
|
||||
Hayashi
|
||||
Hayashida
|
||||
Hayata
|
||||
Hayuata
|
||||
Hida
|
||||
Hideaki
|
||||
Hideki
|
||||
Hideyoshi
|
||||
Higashikuni
|
||||
Higashiyama
|
||||
Higo
|
||||
Higoshi
|
||||
Higuchi
|
||||
Hike
|
||||
Hino
|
||||
Hira
|
||||
Hiraga
|
||||
Hiraki
|
||||
Hirano
|
||||
Hiranuma
|
||||
Hiraoka
|
||||
Hirase
|
||||
Hirasi
|
||||
Hirata
|
||||
Hiratasuka
|
||||
Hirayama
|
||||
Hiro
|
||||
Hirose
|
||||
Hirota
|
||||
Hiroyuki
|
||||
Hisamatsu
|
||||
Hishida
|
||||
Hishikawa
|
||||
Hitomi
|
||||
Hiyama
|
||||
Hohki
|
||||
Hojo
|
||||
Hokusai
|
||||
Honami
|
||||
Honda
|
||||
Hori
|
||||
Horigome
|
||||
Horigoshi
|
||||
Horiuchi
|
||||
Horri
|
||||
Hoshino
|
||||
Hosokawa
|
||||
Hosokaya
|
||||
Hotate
|
||||
Hotta
|
||||
Hyata
|
||||
Hyobanshi
|
||||
Ibi
|
||||
Ibu
|
||||
Ibuka
|
||||
Ichigawa
|
||||
Ichihara
|
||||
Ichikawa
|
||||
Ichimonji
|
||||
Ichiro
|
||||
Ichisada
|
||||
Ichiyusai
|
||||
Idane
|
||||
Iemochi
|
||||
Ienari
|
||||
Iesada
|
||||
Ieyasu
|
||||
Ieyoshi
|
||||
Igarashi
|
||||
Ihara
|
||||
Ii
|
||||
Iida
|
||||
Iijima
|
||||
Iitaka
|
||||
Ijichi
|
||||
Ijiri
|
||||
Ikeda
|
||||
Ikina
|
||||
Ikoma
|
||||
Imada
|
||||
Imagawa
|
||||
Imai
|
||||
Imaizumi
|
||||
Imamura
|
||||
Imoo
|
||||
Ina
|
||||
Inaba
|
||||
Inao
|
||||
Inihara
|
||||
Ino
|
||||
Inoguchi
|
||||
Inokuma
|
||||
Inoue
|
||||
Inouye
|
||||
Inukai
|
||||
Ippitsusai
|
||||
Irie
|
||||
Iriye
|
||||
Isayama
|
||||
Ise
|
||||
Iseki
|
||||
Iseya
|
||||
Ishibashi
|
||||
Ishida
|
||||
Ishiguro
|
||||
Ishihara
|
||||
Ishikawa
|
||||
Ishimaru
|
||||
Ishimura
|
||||
Ishinomori
|
||||
Ishiyama
|
||||
Isobe
|
||||
Isoda
|
||||
Isozaki
|
||||
Itagaki
|
||||
Itami
|
||||
Ito
|
||||
Itoh
|
||||
Iwahara
|
||||
Iwahashi
|
||||
Iwakura
|
||||
Iwasa
|
||||
Iwasaki
|
||||
Izumi
|
||||
Jimbo
|
||||
Jippensha
|
||||
Jo
|
||||
Joshuya
|
||||
Joshuyo
|
||||
Jukodo
|
||||
Jumonji
|
||||
Kada
|
||||
Kagabu
|
||||
Kagawa
|
||||
Kahae
|
||||
Kahaya
|
||||
Kaibara
|
||||
Kaima
|
||||
Kajahara
|
||||
Kajitani
|
||||
Kajiwara
|
||||
Kajiyama
|
||||
Kakinomoto
|
||||
Kakutama
|
||||
Kamachi
|
||||
Kamata
|
||||
Kaminaga
|
||||
Kamio
|
||||
Kamioka
|
||||
Kamisaka
|
||||
Kamo
|
||||
Kamon
|
||||
Kan
|
||||
Kanada
|
||||
Kanagaki
|
||||
Kanegawa
|
||||
Kaneko
|
||||
Kanesaka
|
||||
Kano
|
||||
Karamorita
|
||||
Karube
|
||||
Karubo
|
||||
Kasahara
|
||||
Kasai
|
||||
Kasamatsu
|
||||
Kasaya
|
||||
Kase
|
||||
Kashiwagi
|
||||
Kasuse
|
||||
Kataoka
|
||||
Katayama
|
||||
Katayanagi
|
||||
Kate
|
||||
Kato
|
||||
Katoaka
|
||||
Katsu
|
||||
Katsukawa
|
||||
Katsumata
|
||||
Katsura
|
||||
Katsushika
|
||||
Kawabata
|
||||
Kawachi
|
||||
Kawagichi
|
||||
Kawagishi
|
||||
Kawaguchi
|
||||
Kawai
|
||||
Kawaii
|
||||
Kawakami
|
||||
Kawamata
|
||||
Kawamura
|
||||
Kawasaki
|
||||
Kawasawa
|
||||
Kawashima
|
||||
Kawasie
|
||||
Kawatake
|
||||
Kawate
|
||||
Kawayama
|
||||
Kawazu
|
||||
Kaza
|
||||
Kazuyoshi
|
||||
Kenkyusha
|
||||
Kenmotsu
|
||||
Kentaro
|
||||
Ki
|
||||
Kido
|
||||
Kihara
|
||||
Kijimuta
|
||||
Kijmuta
|
||||
Kikkawa
|
||||
Kikuchi
|
||||
Kikugawa
|
||||
Kikui
|
||||
Kikutake
|
||||
Kimio
|
||||
Kimiyama
|
||||
Kimura
|
||||
Kinashita
|
||||
Kinoshita
|
||||
Kinugasa
|
||||
Kira
|
||||
Kishi
|
||||
Kiski
|
||||
Kita
|
||||
Kitabatake
|
||||
Kitagawa
|
||||
Kitamura
|
||||
Kitano
|
||||
Kitao
|
||||
Kitoaji
|
||||
Ko
|
||||
Kobayashi
|
||||
Kobi
|
||||
Kodama
|
||||
Koga
|
||||
Kogara
|
||||
Kogo
|
||||
Koguchi
|
||||
Koiso
|
||||
Koizumi
|
||||
Kojima
|
||||
Kokan
|
||||
Komagata
|
||||
Komatsu
|
||||
Komatsuzaki
|
||||
Komine
|
||||
Komiya
|
||||
Komon
|
||||
Komura
|
||||
Kon
|
||||
Konae
|
||||
Konda
|
||||
Kondo
|
||||
Konishi
|
||||
Kono
|
||||
Konoe
|
||||
Koruba
|
||||
Koshin
|
||||
Kotara
|
||||
Kotoku
|
||||
Koyama
|
||||
Koyanagi
|
||||
Kozu
|
||||
Kubo
|
||||
Kubota
|
||||
Kudara
|
||||
Kudo
|
||||
Kuga
|
||||
Kumagae
|
||||
Kumasaka
|
||||
Kunda
|
||||
Kunikida
|
||||
Kunisada
|
||||
Kuno
|
||||
Kunomasu
|
||||
Kuramochi
|
||||
Kuramoto
|
||||
Kurata
|
||||
Kurkawa
|
||||
Kurmochi
|
||||
Kuroda
|
||||
Kurofuji
|
||||
Kurogane
|
||||
Kurohiko
|
||||
Kuroki
|
||||
Kurosawa
|
||||
Kurusu
|
||||
Kusatsu
|
||||
Kusonoki
|
||||
Kusuhara
|
||||
Kusunoki
|
||||
Kuwabara
|
||||
Kwakami
|
||||
Kyubei
|
||||
Maeda
|
||||
Maehata
|
||||
Maeno
|
||||
Maita
|
||||
Makiguchi
|
||||
Makino
|
||||
Makioka
|
||||
Makuda
|
||||
Marubeni
|
||||
Marugo
|
||||
Marusa
|
||||
Maruya
|
||||
Maruyama
|
||||
Masanobu
|
||||
Masaoka
|
||||
Mashita
|
||||
Masoni
|
||||
Masudu
|
||||
Masuko
|
||||
Masuno
|
||||
Masuzoe
|
||||
Matano
|
||||
Matokai
|
||||
Matoke
|
||||
Matsuda
|
||||
Matsukata
|
||||
Matsuki
|
||||
Matsumara
|
||||
Matsumoto
|
||||
Matsumura
|
||||
Matsuo
|
||||
Matsuoka
|
||||
Matsura
|
||||
Matsushina
|
||||
Matsushita
|
||||
Matsuya
|
||||
Matsuzawa
|
||||
Mayuzumi
|
||||
Mazaki
|
||||
Mazawa
|
||||
Mazuka
|
||||
Mifune
|
||||
Mihashi
|
||||
Miki
|
||||
Mimasuya
|
||||
Minabuchi
|
||||
Minami
|
||||
Minamoto
|
||||
Minatoya
|
||||
Minobe
|
||||
Mishima
|
||||
Mitsubishi
|
||||
Mitsuharu
|
||||
Mitsui
|
||||
Mitsukuri
|
||||
Mitsuwa
|
||||
Mitsuya
|
||||
Mitzusaka
|
||||
Miura
|
||||
Miwa
|
||||
Miyagi
|
||||
Miyahara
|
||||
Miyajima
|
||||
Miyake
|
||||
Miyamae
|
||||
Miyamoto
|
||||
Miyazaki
|
||||
Miyazawa
|
||||
Miyoshi
|
||||
Mizoguchi
|
||||
Mizumaki
|
||||
Mizuno
|
||||
Mizutani
|
||||
Modegi
|
||||
Momotami
|
||||
Momotani
|
||||
Monomonoi
|
||||
Mori
|
||||
Moriguchi
|
||||
Morimoto
|
||||
Morinaga
|
||||
Morioka
|
||||
Morishita
|
||||
Morisue
|
||||
Morita
|
||||
Morri
|
||||
Moto
|
||||
Motoori
|
||||
Motoyoshi
|
||||
Munakata
|
||||
Munkata
|
||||
Muraguchi
|
||||
Murakami
|
||||
Muraoka
|
||||
Murasaki
|
||||
Murase
|
||||
Murata
|
||||
Murkami
|
||||
Muro
|
||||
Muruyama
|
||||
Mushanaokoji
|
||||
Mushashibo
|
||||
Muso
|
||||
Mutsu
|
||||
Nagahama
|
||||
Nagai
|
||||
Nagano
|
||||
Nagasawa
|
||||
Nagase
|
||||
Nagata
|
||||
Nagatsuka
|
||||
Nagumo
|
||||
Naito
|
||||
Nakada
|
||||
Nakadai
|
||||
Nakadan
|
||||
Nakae
|
||||
Nakagawa
|
||||
Nakahara
|
||||
Nakajima
|
||||
Nakamoto
|
||||
Nakamura
|
||||
Nakane
|
||||
Nakanishi
|
||||
Nakano
|
||||
Nakanoi
|
||||
Nakao
|
||||
Nakasato
|
||||
Nakasawa
|
||||
Nakasone
|
||||
Nakata
|
||||
Nakatoni
|
||||
Nakayama
|
||||
Nakazawa
|
||||
Namiki
|
||||
Nanami
|
||||
Narahashi
|
||||
Narato
|
||||
Narita
|
||||
Nataga
|
||||
Natsume
|
||||
Nawabe
|
||||
Nemoto
|
||||
Niijima
|
||||
Nijo
|
||||
Ninomiya
|
||||
Nishi
|
||||
Nishihara
|
||||
Nishikawa
|
||||
Nishimoto
|
||||
Nishimura
|
||||
Nishimuraya
|
||||
Nishio
|
||||
Nishiwaki
|
||||
Nitta
|
||||
Nobunaga
|
||||
Noda
|
||||
Nogi
|
||||
Noguchi
|
||||
Nogushi
|
||||
Nomura
|
||||
Nonomura
|
||||
Noro
|
||||
Nosaka
|
||||
Nose
|
||||
Nozaki
|
||||
Nozara
|
||||
Numajiri
|
||||
Numata
|
||||
Obata
|
||||
Obinata
|
||||
Obuchi
|
||||
Ochiai
|
||||
Ochida
|
||||
Odaka
|
||||
Ogata
|
||||
Ogiwara
|
||||
Ogura
|
||||
Ogyu
|
||||
Ohba
|
||||
Ohira
|
||||
Ohishi
|
||||
Ohka
|
||||
Ohmae
|
||||
Ohmiya
|
||||
Oichi
|
||||
Oinuma
|
||||
Oishi
|
||||
Okabe
|
||||
Okada
|
||||
Okakura
|
||||
Okamoto
|
||||
Okamura
|
||||
Okanao
|
||||
Okanaya
|
||||
Okano
|
||||
Okasawa
|
||||
Okawa
|
||||
Okazaki
|
||||
Okazawaya
|
||||
Okimasa
|
||||
Okimoto
|
||||
Okita
|
||||
Okubo
|
||||
Okuda
|
||||
Okui
|
||||
Okuma
|
||||
Okuma
|
||||
Okumura
|
||||
Okura
|
||||
Omori
|
||||
Omura
|
||||
Onishi
|
||||
Ono
|
||||
Onoda
|
||||
Onoe
|
||||
Onohara
|
||||
Ooka
|
||||
Osagawa
|
||||
Osaragi
|
||||
Oshima
|
||||
Oshin
|
||||
Ota
|
||||
Otaka
|
||||
Otake
|
||||
Otani
|
||||
Otomo
|
||||
Otsu
|
||||
Otsuka
|
||||
Ouchi
|
||||
Oyama
|
||||
Ozaki
|
||||
Ozawa
|
||||
Ozu
|
||||
Raikatuji
|
||||
Royama
|
||||
Ryusaki
|
||||
Sada
|
||||
Saeki
|
||||
Saga
|
||||
Saigo
|
||||
Saiki
|
||||
Saionji
|
||||
Saito
|
||||
Saitoh
|
||||
Saji
|
||||
Sakagami
|
||||
Sakai
|
||||
Sakakibara
|
||||
Sakamoto
|
||||
Sakanoue
|
||||
Sakata
|
||||
Sakiyurai
|
||||
Sakoda
|
||||
Sakubara
|
||||
Sakuraba
|
||||
Sakurai
|
||||
Sammiya
|
||||
Sanda
|
||||
Sanjo
|
||||
Sano
|
||||
Santo
|
||||
Saromi
|
||||
Sarumara
|
||||
Sasada
|
||||
Sasakawa
|
||||
Sasaki
|
||||
Sassa
|
||||
Satake
|
||||
Sato
|
||||
Satoh
|
||||
Satoya
|
||||
Sawamatsu
|
||||
Sawamura
|
||||
Sayuki
|
||||
Segawa
|
||||
Sekigawa
|
||||
Sekine
|
||||
Sekozawa
|
||||
Sen
|
||||
Senmatsu
|
||||
Seo
|
||||
Serizawa
|
||||
Shiba
|
||||
Shibaguchi
|
||||
Shibanuma
|
||||
Shibasaki
|
||||
Shibasawa
|
||||
Shibata
|
||||
Shibukji
|
||||
Shichirobei
|
||||
Shidehara
|
||||
Shiga
|
||||
Shiganori
|
||||
Shige
|
||||
Shigeki
|
||||
Shigemitsu
|
||||
Shigi
|
||||
Shikitei
|
||||
Shikuk
|
||||
Shima
|
||||
Shimada
|
||||
Shimakage
|
||||
Shimamura
|
||||
Shimanouchi
|
||||
Shimaoka
|
||||
Shimazaki
|
||||
Shimazu
|
||||
Shimedzu
|
||||
Shimizu
|
||||
Shimohira
|
||||
Shimon
|
||||
Shimura
|
||||
Shimuzu
|
||||
Shinko
|
||||
Shinozaki
|
||||
Shinozuka
|
||||
Shintaro
|
||||
Shiokawa
|
||||
Shiomi
|
||||
Shiomiya
|
||||
Shionoya
|
||||
Shiotani
|
||||
Shioya
|
||||
Shirahata
|
||||
Shirai
|
||||
Shiraishi
|
||||
Shirane
|
||||
Shirasu
|
||||
Shiratori
|
||||
Shirokawa
|
||||
Shiroyama
|
||||
Shiskikura
|
||||
Shizuma
|
||||
Shobo
|
||||
Shoda
|
||||
Shunji
|
||||
Shunsen
|
||||
Siagyo
|
||||
Soga
|
||||
Sohda
|
||||
Soho
|
||||
Soma
|
||||
Someya
|
||||
Sone
|
||||
Sonoda
|
||||
Soseki
|
||||
Sotomura
|
||||
Suenami
|
||||
Sugai
|
||||
Sugase
|
||||
Sugawara
|
||||
Sugihara
|
||||
Sugimura
|
||||
Sugisata
|
||||
Sugita
|
||||
Sugitani
|
||||
Sugiyama
|
||||
Sumitimo
|
||||
Sunada
|
||||
Suzambo
|
||||
Suzuki
|
||||
Tabuchi
|
||||
Tadeshi
|
||||
Tagawa
|
||||
Taguchi
|
||||
Taira
|
||||
Taka
|
||||
Takabe
|
||||
Takagaki
|
||||
Takagawa
|
||||
Takagi
|
||||
Takahama
|
||||
Takahashi
|
||||
Takaki
|
||||
Takamura
|
||||
Takano
|
||||
Takaoka
|
||||
Takara
|
||||
Takarabe
|
||||
Takashi
|
||||
Takashita
|
||||
Takasu
|
||||
Takasugi
|
||||
Takayama
|
||||
Takecare
|
||||
Takeda
|
||||
Takei
|
||||
Takekawa
|
||||
Takemago
|
||||
Takemitsu
|
||||
Takemura
|
||||
Takenouchi
|
||||
Takeshita
|
||||
Taketomo
|
||||
Takeuchi
|
||||
Takewaki
|
||||
Takimoto
|
||||
Takishida
|
||||
Takishita
|
||||
Takizawa
|
||||
Taku
|
||||
Takudo
|
||||
Takudome
|
||||
Tamazaki
|
||||
Tamura
|
||||
Tamuro
|
||||
Tanaka
|
||||
Tange
|
||||
Tani
|
||||
Taniguchi
|
||||
Tanizaki
|
||||
Tankoshitsu
|
||||
Tansho
|
||||
Tanuma
|
||||
Tarumi
|
||||
Tatenaka
|
||||
Tatsuko
|
||||
Tatsuno
|
||||
Tatsuya
|
||||
Tawaraya
|
||||
Tayama
|
||||
Temko
|
||||
Tenshin
|
||||
Terada
|
||||
Terajima
|
||||
Terakado
|
||||
Terauchi
|
||||
Teshigahara
|
||||
Teshima
|
||||
Tochikura
|
||||
Togo
|
||||
Tojo
|
||||
Tokaji
|
||||
Tokuda
|
||||
Tokudome
|
||||
Tokuoka
|
||||
Tomika
|
||||
Tomimoto
|
||||
Tomioka
|
||||
Tommii
|
||||
Tomonaga
|
||||
Tomori
|
||||
Tono
|
||||
Torii
|
||||
Torisei
|
||||
Toru
|
||||
Toshishai
|
||||
Toshitala
|
||||
Toshusai
|
||||
Toyama
|
||||
Toyoda
|
||||
Toyoshima
|
||||
Toyota
|
||||
Toyotomi
|
||||
Tsubouchi
|
||||
Tsucgimoto
|
||||
Tsuchie
|
||||
Tsuda
|
||||
Tsuji
|
||||
Tsujimoto
|
||||
Tsujimura
|
||||
Tsukada
|
||||
Tsukade
|
||||
Tsukahara
|
||||
Tsukamoto
|
||||
Tsukatani
|
||||
Tsukawaki
|
||||
Tsukehara
|
||||
Tsukioka
|
||||
Tsumemasa
|
||||
Tsumura
|
||||
Tsunoda
|
||||
Tsurimi
|
||||
Tsuruga
|
||||
Tsuruya
|
||||
Tsushima
|
||||
Tsutaya
|
||||
Tsutomu
|
||||
Uboshita
|
||||
Uchida
|
||||
Uchiyama
|
||||
Ueda
|
||||
Uehara
|
||||
Uemura
|
||||
Ueshima
|
||||
Uesugi
|
||||
Uetake
|
||||
Ugaki
|
||||
Ui
|
||||
Ukiyo
|
||||
Umari
|
||||
Umehara
|
||||
Umeki
|
||||
Uno
|
||||
Uoya
|
||||
Urogataya
|
||||
Usami
|
||||
Ushiba
|
||||
Utagawa
|
||||
Wakai
|
||||
Wakatsuki
|
||||
Watabe
|
||||
Watanabe
|
||||
Watari
|
||||
Watnabe
|
||||
Watoga
|
||||
Yakuta
|
||||
Yamabe
|
||||
Yamada
|
||||
Yamagata
|
||||
Yamaguchi
|
||||
Yamaguchiya
|
||||
Yamaha
|
||||
Yamahata
|
||||
Yamakage
|
||||
Yamakawa
|
||||
Yamakazi
|
||||
Yamamoto
|
||||
Yamamura
|
||||
Yamana
|
||||
Yamanaka
|
||||
Yamanouchi
|
||||
Yamanoue
|
||||
Yamaoka
|
||||
Yamashita
|
||||
Yamato
|
||||
Yamawaki
|
||||
Yamazaki
|
||||
Yamhata
|
||||
Yamura
|
||||
Yanagawa
|
||||
Yanagi
|
||||
Yanagimoto
|
||||
Yanagita
|
||||
Yano
|
||||
Yasuda
|
||||
Yasuhiro
|
||||
Yasui
|
||||
Yasujiro
|
||||
Yasukawa
|
||||
Yasutake
|
||||
Yoemon
|
||||
Yokokawa
|
||||
Yokoyama
|
||||
Yonai
|
||||
Yosano
|
||||
Yoshida
|
||||
Yoshifumi
|
||||
Yoshihara
|
||||
Yoshikawa
|
||||
Yoshimatsu
|
||||
Yoshinobu
|
||||
Yoshioka
|
||||
Yoshitomi
|
||||
Yoshizaki
|
||||
Yoshizawa
|
||||
Yuasa
|
||||
Yuhara
|
||||
Yunokawa
|
|
@ -0,0 +1,94 @@
|
|||
Ahn
|
||||
Baik
|
||||
Bang
|
||||
Byon
|
||||
Cha
|
||||
Chang
|
||||
Chi
|
||||
Chin
|
||||
Cho
|
||||
Choe
|
||||
Choi
|
||||
Chong
|
||||
Chou
|
||||
Chu
|
||||
Chun
|
||||
Chung
|
||||
Chweh
|
||||
Gil
|
||||
Gu
|
||||
Gwang
|
||||
Ha
|
||||
Han
|
||||
Ho
|
||||
Hong
|
||||
Hung
|
||||
Hwang
|
||||
Hyun
|
||||
Jang
|
||||
Jeon
|
||||
Jeong
|
||||
Jo
|
||||
Jon
|
||||
Jong
|
||||
Jung
|
||||
Kang
|
||||
Kim
|
||||
Ko
|
||||
Koo
|
||||
Ku
|
||||
Kwak
|
||||
Kwang
|
||||
Lee
|
||||
Li
|
||||
Lim
|
||||
Ma
|
||||
Mo
|
||||
Moon
|
||||
Nam
|
||||
Ngai
|
||||
Noh
|
||||
Oh
|
||||
Pae
|
||||
Pak
|
||||
Park
|
||||
Ra
|
||||
Rhee
|
||||
Rheem
|
||||
Ri
|
||||
Rim
|
||||
Ron
|
||||
Ryom
|
||||
Ryoo
|
||||
Ryu
|
||||
San
|
||||
Seo
|
||||
Seok
|
||||
Shim
|
||||
Shin
|
||||
Shon
|
||||
Si
|
||||
Sin
|
||||
So
|
||||
Son
|
||||
Song
|
||||
Sook
|
||||
Suh
|
||||
Suk
|
||||
Sun
|
||||
Sung
|
||||
Tsai
|
||||
Wang
|
||||
Woo
|
||||
Yang
|
||||
Yeo
|
||||
Yeon
|
||||
Yi
|
||||
Yim
|
||||
Yoo
|
||||
Yoon
|
||||
You
|
||||
Youj
|
||||
Youn
|
||||
Yu
|
||||
Yun
|
|
@ -0,0 +1,139 @@
|
|||
Adamczak
|
||||
Adamczyk
|
||||
Andrysiak
|
||||
Auttenberg
|
||||
Bartosz
|
||||
Bernard
|
||||
Bobienski
|
||||
Bosko
|
||||
Broż
|
||||
Brzezicki
|
||||
Budny
|
||||
Bukoski
|
||||
Bukowski
|
||||
Chlebek
|
||||
Chmiel
|
||||
Czajka
|
||||
Czajkowski
|
||||
Dubanowski
|
||||
Dubicki
|
||||
Dunajski
|
||||
Dziedzic
|
||||
Fabian
|
||||
Filipek
|
||||
Filipowski
|
||||
Gajos
|
||||
Gniewek
|
||||
Gomolka
|
||||
Gomulka
|
||||
Gorecki
|
||||
Górka
|
||||
Górski
|
||||
Grzeskiewicz
|
||||
Gwozdek
|
||||
Jagoda
|
||||
Janda
|
||||
Janowski
|
||||
Jaskolski
|
||||
Jaskulski
|
||||
Jedynak
|
||||
Jelen
|
||||
Jez
|
||||
Jordan
|
||||
Kaczka
|
||||
Kaluza
|
||||
Kamiński
|
||||
Kasprzak
|
||||
Kava
|
||||
Kedzierski
|
||||
Kijek
|
||||
Klimek
|
||||
Kosmatka
|
||||
Kowalczyk
|
||||
Kowalski
|
||||
Koziol
|
||||
Kozlow
|
||||
Kozlowski
|
||||
Krakowski
|
||||
Król
|
||||
Kumiega
|
||||
Lawniczak
|
||||
Lis
|
||||
Majewski
|
||||
Malinowski
|
||||
Maly
|
||||
Marek
|
||||
Marszałek
|
||||
Maslanka
|
||||
Mencher
|
||||
Miazga
|
||||
Michel
|
||||
Mikolajczak
|
||||
Mozdzierz
|
||||
Niemczyk
|
||||
Niemec
|
||||
Nosek
|
||||
Nowak
|
||||
Pakulski
|
||||
Pasternack
|
||||
Pasternak
|
||||
Paszek
|
||||
Piatek
|
||||
Piontek
|
||||
Pokorny
|
||||
Poplawski
|
||||
Róg
|
||||
Rudaski
|
||||
Rudawski
|
||||
Rusnak
|
||||
Rutkowski
|
||||
Sadowski
|
||||
Salomon
|
||||
Serafin
|
||||
Sienkiewicz
|
||||
Sierzant
|
||||
Sitko
|
||||
Skala
|
||||
Slaski
|
||||
Ślązak
|
||||
Ślusarczyk
|
||||
Ślusarski
|
||||
Smolák
|
||||
Sniegowski
|
||||
Sobol
|
||||
Sokal
|
||||
Sokolof
|
||||
Sokoloff
|
||||
Sokolofsky
|
||||
Sokolowski
|
||||
Sokolsky
|
||||
Sówka
|
||||
Stanek
|
||||
Starek
|
||||
Stawski
|
||||
Stolarz
|
||||
Szczepanski
|
||||
Szewc
|
||||
Szwarc
|
||||
Szweda
|
||||
Szwedko
|
||||
Walentowicz
|
||||
Warszawski
|
||||
Wawrzaszek
|
||||
Wiater
|
||||
Winograd
|
||||
Winogrodzki
|
||||
Wojda
|
||||
Wojewódka
|
||||
Wojewódzki
|
||||
Wronski
|
||||
Wyrick
|
||||
Wyrzyk
|
||||
Zabek
|
||||
Zawisza
|
||||
Zdunowski
|
||||
Zdunowski
|
||||
Zielinski
|
||||
Ziemniak
|
||||
Zientek
|
||||
Żuraw
|
|
@ -0,0 +1,74 @@
|
|||
Abreu
|
||||
Albuquerque
|
||||
Almeida
|
||||
Alves
|
||||
Araújo
|
||||
Araullo
|
||||
Barros
|
||||
Basurto
|
||||
Belo
|
||||
Cabral
|
||||
Campos
|
||||
Cardozo
|
||||
Castro
|
||||
Coelho
|
||||
Costa
|
||||
Crespo
|
||||
Cruz
|
||||
D'cruz
|
||||
D'cruze
|
||||
Delgado
|
||||
De santigo
|
||||
Duarte
|
||||
Estéves
|
||||
Fernandes
|
||||
Ferreira
|
||||
Ferreiro
|
||||
Ferro
|
||||
Fonseca
|
||||
Franco
|
||||
Freitas
|
||||
Garcia
|
||||
Gaspar
|
||||
Gomes
|
||||
Gouveia
|
||||
Guerra
|
||||
Henriques
|
||||
Lobo
|
||||
Machado
|
||||
Madeira
|
||||
Magalhães
|
||||
Maria
|
||||
Mata
|
||||
Mateus
|
||||
Matos
|
||||
Medeiros
|
||||
Melo
|
||||
Mendes
|
||||
Moreno
|
||||
Nunes
|
||||
Palmeiro
|
||||
Paredes
|
||||
Pereira
|
||||
Pinheiro
|
||||
Pinho
|
||||
Ramires
|
||||
Ribeiro
|
||||
Rios
|
||||
Rocha
|
||||
Rodrigues
|
||||
Romão
|
||||
Rosario
|
||||
Salazar
|
||||
Santana
|
||||
Santiago
|
||||
Santos
|
||||
Serafim
|
||||
Silva
|
||||
Silveira
|
||||
Simões
|
||||
Soares
|
||||
Souza
|
||||
Torres
|
||||
Vargas
|
||||
Ventura
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue