Skip to content

Commit bdc0176

Browse files
authored
Merge pull request #6 from ehanson8/python3
Python 3 converion
2 parents 333a5ec + 79799d8 commit bdc0176

22 files changed

+2143
-2142
lines changed

.gitignore

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -68,3 +68,5 @@ local/*
6868
.profile
6969
*.csv
7070
*.json
71+
Pipfile
72+
Pipfile.lock

checkInventory.py

Lines changed: 52 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -1,52 +1,52 @@
1-
import argparse
2-
import pandas as pd
3-
import os
4-
5-
6-
def main():
7-
# begin: argument parsing
8-
parser = argparse.ArgumentParser()
9-
10-
parser.add_argument('-i', '--inventory', required=True,
11-
help='csv file containing the inventory. the path, if given, can be absolute or relative to this script')
12-
13-
parser.add_argument('-d', '--dataDir',
14-
help='directory containing the data. if omitted, data will be read from the directory containing the inventory file')
15-
16-
parser.add_argument('-f', '--field',
17-
help='field in the csv containing the fileNames. default: name')
18-
19-
parser.add_argument('-v', '--verbose', action='store_true',
20-
help='increase output verbosity')
21-
22-
args = parser.parse_args()
23-
24-
if not args.dataDir:
25-
(args.dataDir, null) = os.path.split(args.inventory)
26-
27-
if not args.field:
28-
args.field = 'name'
29-
30-
if args.verbose:
31-
print('verbosity turned on')
32-
print('reading inventory from {}'.format(args.inventory))
33-
print('fileNames read from field named {}'.format(args.field))
34-
print('searching for files in {}'.format(args.dataDir))
35-
# end: argument parsing
36-
37-
inventory = pd.read_csv(args.inventory, usecols=[args.field])
38-
fileNames = inventory[args.field]
39-
foundfiles = 0
40-
missingfiles = 0
41-
for fileName in fileNames:
42-
if os.path.isfile(args.dataDir + '/' + fileName):
43-
if args.verbose: print('{} is not missing'.format(fileName))
44-
foundfiles += 1
45-
else:
46-
print('{} is missing'.format(fileName))
47-
missingfiles += 1
48-
49-
print('{} files found and {} files missing'.format(foundfiles, missingfiles))
50-
51-
52-
if __name__ == "__main__": main()
1+
import argparse
2+
import pandas as pd
3+
import os
4+
5+
6+
def main():
7+
# begin: argument parsing
8+
parser = argparse.ArgumentParser()
9+
10+
parser.add_argument('-i', '--inventory', required=True,
11+
help='csv file containing the inventory. the path, if given, can be absolute or relative to this script')
12+
13+
parser.add_argument('-d', '--dataDir',
14+
help='directory containing the data. if omitted, data will be read from the directory containing the inventory file')
15+
16+
parser.add_argument('-f', '--field',
17+
help='field in the csv containing the fileNames. default: name')
18+
19+
parser.add_argument('-v', '--verbose', action='store_true',
20+
help='increase output verbosity')
21+
22+
args = parser.parse_args()
23+
24+
if not args.dataDir:
25+
(args.dataDir, null) = os.path.split(args.inventory)
26+
27+
if not args.field:
28+
args.field = 'name'
29+
30+
if args.verbose:
31+
print('verbosity turned on')
32+
print('reading inventory from {}'.format(args.inventory))
33+
print('fileNames read from field named {}'.format(args.field))
34+
print('searching for files in {}'.format(args.dataDir))
35+
# end: argument parsing
36+
37+
inventory = pd.read_csv(args.inventory, usecols=[args.field])
38+
fileNames = inventory[args.field]
39+
foundfiles = 0
40+
missingfiles = 0
41+
for fileName in fileNames:
42+
if os.path.isfile(args.dataDir + '/' + fileName):
43+
if args.verbose: print('{} is not missing'.format(fileName))
44+
foundfiles += 1
45+
else:
46+
print('{} is missing'.format(fileName))
47+
missingfiles += 1
48+
49+
print('{} files found and {} files missing'.format(foundfiles, missingfiles))
50+
51+
52+
if __name__ == "__main__": main()

compareTwoKeysInCommunity.py

Lines changed: 121 additions & 122 deletions
Original file line numberDiff line numberDiff line change
@@ -1,122 +1,121 @@
1-
import json
2-
import requests
3-
import secrets
4-
import csv
5-
import time
6-
import urllib3
7-
import argparse
8-
9-
secretsVersion = raw_input('To edit production server, enter the name of the secrets file: ')
10-
if secretsVersion != '':
11-
try:
12-
secrets = __import__(secretsVersion)
13-
print 'Editing Production'
14-
except ImportError:
15-
print 'Editing Stage'
16-
else:
17-
print 'Editing Stage'
18-
19-
parser = argparse.ArgumentParser()
20-
parser.add_argument('-1', '--key', help='the first key to be output. optional - if not provided, the script will ask for input')
21-
parser.add_argument('-2', '--key2', help='the second key to be output. optional - if not provided, the script will ask for input')
22-
parser.add_argument('-i', '--handle', help='handle of the community to retreive. optional - if not provided, the script will ask for input')
23-
args = parser.parse_args()
24-
25-
if args.key:
26-
key = args.key
27-
else:
28-
key = raw_input('Enter first key: ')
29-
if args.key2:
30-
key2 = args.key2
31-
else:
32-
key2 = raw_input('Enter second key: ')
33-
if args.handle:
34-
handle = args.handle
35-
else:
36-
handle = raw_input('Enter community handle: ')
37-
38-
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
39-
40-
41-
baseURL = secrets.baseURL
42-
email = secrets.email
43-
password = secrets.password
44-
filePath = secrets.filePath
45-
verify = secrets.verify
46-
skippedCollections = secrets.skippedCollections
47-
48-
startTime = time.time()
49-
data = {'email':email,'password':password}
50-
header = {'content-type':'application/json','accept':'application/json'}
51-
session = requests.post(baseURL+'/rest/login', headers=header, verify=verify, params=data).cookies['JSESSIONID']
52-
cookies = {'JSESSIONID': session}
53-
headerFileUpload = {'accept':'application/json'}
54-
cookiesFileUpload = cookies
55-
status = requests.get(baseURL+'/rest/status', headers=header, cookies=cookies, verify=verify).json()
56-
print 'authenticated'
57-
58-
endpoint = baseURL+'/rest/handle/'+handle
59-
community = requests.get(endpoint, headers=header, cookies=cookies, verify=verify).json()
60-
communityID = community['uuid']
61-
62-
itemList = []
63-
endpoint = baseURL+'/rest/communities'
64-
collections = requests.get(baseURL+'/rest/communities/'+str(communityID)+'/collections', headers=header, cookies=cookies, verify=verify).json()
65-
for j in range (0, len (collections)):
66-
collectionID = collections[j]['uuid']
67-
print collectionID
68-
if collectionID not in skippedCollections:
69-
offset = 0
70-
items = ''
71-
while items != []:
72-
items = requests.get(baseURL+'/rest/collections/'+str(collectionID)+'/items?limit=200&offset='+str(offset), headers=header, cookies=cookies, verify=verify)
73-
while items.status_code != 200:
74-
time.sleep(5)
75-
items = requests.get(baseURL+'/rest/collections/'+str(collectionID)+'/items?limit=200&offset='+str(offset), headers=header, cookies=cookies, verify=verify)
76-
items = items.json()
77-
for k in range (0, len (items)):
78-
itemID = items[k]['uuid']
79-
itemList.append(itemID)
80-
offset = offset + 200
81-
print offset
82-
elapsedTime = time.time() - startTime
83-
m, s = divmod(elapsedTime, 60)
84-
h, m = divmod(m, 60)
85-
print 'Item list creation time: ','%d:%02d:%02d' % (h, m, s)
86-
87-
valueList = []
88-
for number, itemID in enumerate(itemList):
89-
itemsRemaining = len(itemList) - number
90-
print 'Items remaining: ', itemsRemaining, 'ItemID: ', itemID
91-
metadata = requests.get(baseURL+'/rest/items/'+str(itemID)+'/metadata', headers=header, cookies=cookies, verify=verify).json()
92-
itemTuple = (itemID,)
93-
tupleValue1 = ''
94-
tupleValue2 = ''
95-
for l in range (0, len (metadata)):
96-
if metadata[l]['key'] == key:
97-
metadataValue = metadata[l]['value'].encode('utf-8')
98-
tupleValue1 = metadataValue
99-
if metadata[l]['key'] == key2:
100-
metadataValue = metadata[l]['value'].encode('utf-8')
101-
tupleValue2 = metadataValue
102-
itemTuple = itemTuple + (tupleValue1 , tupleValue2)
103-
valueList.append(itemTuple)
104-
print itemTuple
105-
print valueList
106-
107-
elapsedTime = time.time() - startTime
108-
m, s = divmod(elapsedTime, 60)
109-
h, m = divmod(m, 60)
110-
print 'Value list creation time: ','%d:%02d:%02d' % (h, m, s)
111-
112-
f=csv.writer(open(filePath+key+'-'+key2+'Values.csv', 'wb'))
113-
f.writerow(['itemID']+[key]+[key2])
114-
for i in range (0, len (valueList)):
115-
f.writerow([valueList[i][0]]+[valueList[i][1]]+[valueList[i][2]])
116-
117-
logout = requests.post(baseURL+'/rest/logout', headers=header, cookies=cookies, verify=verify)
118-
119-
elapsedTime = time.time() - startTime
120-
m, s = divmod(elapsedTime, 60)
121-
h, m = divmod(m, 60)
122-
print 'Total script run time: ', '%d:%02d:%02d' % (h, m, s)
1+
import json
2+
import requests
3+
import secrets
4+
import csv
5+
import time
6+
import urllib3
7+
import argparse
8+
9+
secretsVersion = input('To edit production server, enter the name of the secrets file: ')
10+
if secretsVersion != '':
11+
try:
12+
secrets = __import__(secretsVersion)
13+
print('Editing Production')
14+
except ImportError:
15+
print('Editing Stage')
16+
else:
17+
print('Editing Stage')
18+
19+
parser = argparse.ArgumentParser()
20+
parser.add_argument('-1', '--key', help='the first key to be output. optional - if not provided, the script will ask for input')
21+
parser.add_argument('-2', '--key2', help='the second key to be output. optional - if not provided, the script will ask for input')
22+
parser.add_argument('-i', '--handle', help='handle of the community to retreive. optional - if not provided, the script will ask for input')
23+
args = parser.parse_args()
24+
25+
if args.key:
26+
key = args.key
27+
else:
28+
key = input('Enter first key: ')
29+
if args.key2:
30+
key2 = args.key2
31+
else:
32+
key2 = input('Enter second key: ')
33+
if args.handle:
34+
handle = args.handle
35+
else:
36+
handle = input('Enter community handle: ')
37+
38+
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
39+
40+
baseURL = secrets.baseURL
41+
email = secrets.email
42+
password = secrets.password
43+
filePath = secrets.filePath
44+
verify = secrets.verify
45+
skippedCollections = secrets.skippedCollections
46+
47+
startTime = time.time()
48+
data = {'email':email,'password':password}
49+
header = {'content-type':'application/json','accept':'application/json'}
50+
session = requests.post(baseURL+'/rest/login', headers=header, verify=verify, params=data).cookies['JSESSIONID']
51+
cookies = {'JSESSIONID': session}
52+
headerFileUpload = {'accept':'application/json'}
53+
54+
status = requests.get(baseURL+'/rest/status', headers=header, cookies=cookies, verify=verify).json()
55+
print('authenticated')
56+
57+
endpoint = baseURL+'/rest/handle/'+handle
58+
community = requests.get(endpoint, headers=header, cookies=cookies, verify=verify).json()
59+
communityID = community['uuid']
60+
61+
itemList = []
62+
endpoint = baseURL+'/rest/communities'
63+
collections = requests.get(baseURL+'/rest/communities/'+str(communityID)+'/collections', headers=header, cookies=cookies, verify=verify).json()
64+
for j in range (0, len (collections)):
65+
collectionID = collections[j]['uuid']
66+
print(collectionID)
67+
if collectionID not in skippedCollections:
68+
offset = 0
69+
items = ''
70+
while items != []:
71+
items = requests.get(baseURL+'/rest/collections/'+str(collectionID)+'/items?limit=200&offset='+str(offset), headers=header, cookies=cookies, verify=verify)
72+
while items.status_code != 200:
73+
time.sleep(5)
74+
items = requests.get(baseURL+'/rest/collections/'+str(collectionID)+'/items?limit=200&offset='+str(offset), headers=header, cookies=cookies, verify=verify)
75+
items = items.json()
76+
for k in range (0, len (items)):
77+
itemID = items[k]['uuid']
78+
itemList.append(itemID)
79+
offset = offset + 200
80+
print(offset)
81+
elapsedTime = time.time() - startTime
82+
m, s = divmod(elapsedTime, 60)
83+
h, m = divmod(m, 60)
84+
print('Item list creation time: ','%d:%02d:%02d' % (h, m, s))
85+
86+
valueList = []
87+
for number, itemID in enumerate(itemList):
88+
itemsRemaining = len(itemList) - number
89+
print('Items remaining: ', itemsRemaining, 'ItemID: ', itemID)
90+
metadata = requests.get(baseURL+'/rest/items/'+str(itemID)+'/metadata', headers=header, cookies=cookies, verify=verify).json()
91+
itemTuple = (itemID,)
92+
tupleValue1 = ''
93+
tupleValue2 = ''
94+
for l in range (0, len (metadata)):
95+
if metadata[l]['key'] == key:
96+
metadataValue = metadata[l]['value']
97+
tupleValue1 = metadataValue
98+
if metadata[l]['key'] == key2:
99+
metadataValue = metadata[l]['value']
100+
tupleValue2 = metadataValue
101+
itemTuple = itemTuple + (tupleValue1 , tupleValue2)
102+
valueList.append(itemTuple)
103+
print(itemTuple)
104+
print(valueList)
105+
106+
elapsedTime = time.time() - startTime
107+
m, s = divmod(elapsedTime, 60)
108+
h, m = divmod(m, 60)
109+
print('Value list creation time: ','%d:%02d:%02d' % (h, m, s))
110+
111+
f=csv.writer(open(filePath+key+'-'+key2+'Values.csv', 'w'))
112+
f.writerow(['itemID']+[key]+[key2])
113+
for i in range (0, len (valueList)):
114+
f.writerow([valueList[i][0]]+[valueList[i][1]]+[valueList[i][2]])
115+
116+
logout = requests.post(baseURL+'/rest/logout', headers=header, cookies=cookies, verify=verify)
117+
118+
elapsedTime = time.time() - startTime
119+
m, s = divmod(elapsedTime, 60)
120+
h, m = divmod(m, 60)
121+
print('Total script run time: ', '%d:%02d:%02d' % (h, m, s))

0 commit comments

Comments
 (0)