Skip to content

Commit

Permalink
new black format
Browse files Browse the repository at this point in the history
  • Loading branch information
igorbrigadir committed Dec 21, 2022
1 parent 1d959b9 commit a7e5204
Show file tree
Hide file tree
Showing 9 changed files with 0 additions and 22 deletions.
2 changes: 0 additions & 2 deletions twarc/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,6 @@ def search(
reached_end = False

while True:

# note: max_id changes as results are retrieved
if max_id:
params["max_id"] = max_id
Expand Down Expand Up @@ -724,7 +723,6 @@ def replies(self, tweet, recursive=False, prune=()):
tweet_id = tweet["id_str"]
log.info("looking for replies to: %s", tweet_id)
for reply in self.search("to:%s" % screen_name, since_id=tweet_id):

if reply["in_reply_to_status_id_str"] != tweet_id:
continue

Expand Down
4 changes: 0 additions & 4 deletions twarc/client2.py
Original file line number Diff line number Diff line change
Expand Up @@ -256,7 +256,6 @@ def _search(
if using_counts:
while True:
for response in self.get_paginated(url, params=params):

# Note that we're ensuring the appropriate amount of sleep is
# taken before yielding every item. This ensures that we won't
# exceed the rate limit even in cases where a response generator
Expand Down Expand Up @@ -309,7 +308,6 @@ def _search(

else:
for response in self.get_paginated(url, params=params):

# Note that we're ensuring the appropriate amount of sleep is
# taken before yielding every item. This ensures that we won't
# exceed the rate limit even in cases where a response generator
Expand Down Expand Up @@ -914,7 +912,6 @@ def tweet_lookup(
"""

def lookup_batch(tweet_id):

url = "https://api.twitter.com/2/tweets"

params = self._prepare_params(
Expand Down Expand Up @@ -1653,7 +1650,6 @@ def get_paginated(self, *args, **kwargs):
token_param = "next_token"

while "meta" in page and "next_token" in page["meta"]:

if "params" in kwargs:
kwargs["params"][token_param] = page["meta"]["next_token"]
else:
Expand Down
1 change: 0 additions & 1 deletion twarc/command.py
Original file line number Diff line number Diff line change
Expand Up @@ -291,7 +291,6 @@ def stop(signal, frame):
line_count = 0
file_count = 0
for thing in things:

# rotate the files if necessary
if args.output and args.split and line_count % args.split == 0:
file_count += 1
Expand Down
8 changes: 0 additions & 8 deletions twarc/command2.py
Original file line number Diff line number Diff line change
Expand Up @@ -416,7 +416,6 @@ def _validate_max_results(context, parameter, value):
)

if value:

if not archive_set and value > 100:
raise click.BadParameter(
"--max-results cannot be greater than 100 when using Standard Access. Specify --archive if you have Academic Access."
Expand All @@ -431,7 +430,6 @@ def _validate_max_results(context, parameter, value):
return value

else:

if archive_set and (
no_context_annotations_set
or minimal_fields_set
Expand Down Expand Up @@ -1490,7 +1488,6 @@ def timelines(
break

for user in users:

# only process a given user once
if user in seen:
log.info("already processed %s, skipping", user)
Expand Down Expand Up @@ -1704,7 +1701,6 @@ def searches(
# TODO: Needs an inputlines progress bar instead, as the queries are variable
# size.
with FileLineProgressBar(infile, outfile, disable=hide_progress) as progress:

merged_query = ""
extended_query = None
query = None
Expand Down Expand Up @@ -1755,7 +1751,6 @@ def searches(
response = api_method(issue_query, **kwargs)

for result in response:

if counts_only:
for r in result["data"]:
click.echo(
Expand All @@ -1780,7 +1775,6 @@ def searches(
response = api_method(merged_query, **kwargs)

for result in response:

if counts_only:
for r in result["data"]:
click.echo(
Expand Down Expand Up @@ -1902,7 +1896,6 @@ def f():
conv_count = 0

for conv_id in conv_ids:

if conv_id in seen:
log.info(f"already fetched conversation_id {conv_id}")
seen.add(conv_id)
Expand Down Expand Up @@ -2805,7 +2798,6 @@ def _wait_for_job(T, job, hide_progress=False):
disable=hide_progress,
bar_format="{l_bar}{bar}| Waiting {n_time}/{total_time}{postfix}",
) as pbar:

while True:
try:
pbar.refresh()
Expand Down
2 changes: 0 additions & 2 deletions twarc/decorators2.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@ def new_f(*args, **kwargs):
errors = 0
return resp
elif resp.status_code == 429:

# Check the headers, and try to infer why we're hitting the
# rate limit. Because the search/all endpoints also have a
# 1r/s rate limit that isn't obvious in the headers, we need
Expand Down Expand Up @@ -132,7 +131,6 @@ def new_f(self, *args, **kwargs):
errors = 0
return resp
except (requests.exceptions.RequestException, ConnectionError) as e:

# don't catch any HTTP errors since those are handled separately
if isinstance(e, requests.exceptions.HTTPError):
raise e
Expand Down
1 change: 0 additions & 1 deletion twarc/handshake.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@


def handshake():

# Default empty keys
consumer_key = ""
consumer_secret = ""
Expand Down
1 change: 0 additions & 1 deletion utils/extractor.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,6 @@ def extract(json_object, args, csv_writer):
found = found1

for row in found:

csv_writer.writerow(row)
return len(found)

Expand Down
2 changes: 0 additions & 2 deletions utils/media2warc.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,6 @@ def __init__(self, out_queue, warcfile):
self.dedup = Dedup()

def run(self):

with open(self.warcfile, "ab") as output:
while True:
self.lock.acquire()
Expand Down Expand Up @@ -157,7 +156,6 @@ def parse_extended_entities(extended_entities_dict):

if "media" in extended_entities_dict.keys():
for item in extended_entities_dict["media"]:

# add static image
urls.append(item["media_url_https"])

Expand Down
1 change: 0 additions & 1 deletion utils/media_urls.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@

if "extended_entities" in tweet and "media" in tweet["extended_entities"]:
for media in tweet["extended_entities"]["media"]:

if media["type"] == "animated_gif":
print(id, media["media_url_https"])

Expand Down

0 comments on commit a7e5204

Please sign in to comment.